Switch out llm for llama
This commit is contained in:
parent
5faa99158d
commit
cdd892d4e1
@ -1,22 +1,11 @@
|
||||
;; -*- lexical-binding: t; -*-
|
||||
|
||||
(use-package llm
|
||||
:straight (:type built-in)
|
||||
:ensure nil
|
||||
:load-path "packages/llm"
|
||||
:defer t
|
||||
:commands (llm-call
|
||||
llm-set-model
|
||||
llm-prompt
|
||||
llm-prompt-buffer
|
||||
llm-prompt-region
|
||||
llm-chat
|
||||
llm-doctor)
|
||||
(use-package llama
|
||||
:straight `(:local-repo ,(expand-file-name "packages/llama" user-emacs-directory) :type nil)
|
||||
:load-path "packages/llama"
|
||||
:config
|
||||
(add-to-list 'display-buffer-alist '("\\*llm-.*\\*"
|
||||
(display-buffer-reuse-mode-window
|
||||
display-buffer-pop-up-window)))
|
||||
:custom
|
||||
(llm-model "llama3:latest"))
|
||||
(require 'llm-ollama)
|
||||
(setq llama-llm-provider (make-llm-ollama :chat-model "llama3:latest")
|
||||
llm-warn-on-nonfree nil))
|
||||
|
||||
(provide 'init-llm)
|
||||
|
170
emacs/.emacs.d/packages/llama/llama.el
Normal file
170
emacs/.emacs.d/packages/llama/llama.el
Normal file
@ -0,0 +1,170 @@
|
||||
;;; llama.el --- AI-assisted Emacs -*- lexical-binding: t; -*-
|
||||
|
||||
;; Copyright (C) 2024 Jeremy Isaac Dormitzer
|
||||
|
||||
;; Author: Jeremy Isaac Dormitzer <jeremy.dormitzer@gmail.com>
|
||||
;; Package-Requires: ((emacs "28.1") (llm "0.15") (markdown-mode "2.7") (s "1.13"))
|
||||
|
||||
;; This program is free software; you can redistribute it and/or modify
|
||||
;; it under the terms of the GNU General Public License as published by
|
||||
;; the Free Software Foundation, either version 3 of the License, or
|
||||
;; (at your option) any later version.
|
||||
|
||||
;; This program is distributed in the hope that it will be useful,
|
||||
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
;; GNU General Public License for more details.
|
||||
|
||||
;; You should have received a copy of the GNU General Public License
|
||||
;; along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
;;; Commentary:
|
||||
|
||||
;; Provides AI assistance features via llm.el.
|
||||
|
||||
;;; Code:
|
||||
(require 'llm)
|
||||
(require 's)
|
||||
;; for font-locking
|
||||
(require 'markdown-mode)
|
||||
|
||||
(defvar llama-llm-provider nil
|
||||
"The llm.el provider to use.")
|
||||
|
||||
(defvar-local llama-current-chat-prompt nil
|
||||
"Chat prompt object for the current buffer.")
|
||||
|
||||
(defvar-local llama-user-input-begin-marker nil
|
||||
"Marker for the beginning of the user's input.")
|
||||
|
||||
(defvar-local llama-user-input-end-marker nil
|
||||
"Marker for the end of the user's input.")
|
||||
|
||||
(defvar-local llama-ai-response-begin-marker nil
|
||||
"Marker for the AI's response.")
|
||||
|
||||
(defvar-local llama-ai-response-end-marker nil
|
||||
"Marker for the AI's response.")
|
||||
|
||||
(defun llama-chat-streaming-to-chat-buffer (provider prompt buffer point finish-callback)
|
||||
"A version of `llm-chat-streaming-to-point' specialized to write to the chat buffer"
|
||||
(with-current-buffer buffer
|
||||
(save-excursion
|
||||
(let ((start (make-marker))
|
||||
(end (make-marker)))
|
||||
(set-marker start point)
|
||||
(set-marker end point)
|
||||
(set-marker-insertion-type start nil)
|
||||
(set-marker-insertion-type end t)
|
||||
(cl-flet ((insert-text (text)
|
||||
;; Erase and insert the new text between the marker cons.
|
||||
(with-current-buffer (marker-buffer start)
|
||||
(let ((inhibit-read-only t))
|
||||
(save-excursion
|
||||
(goto-char start)
|
||||
(delete-region start end)
|
||||
(insert text))))))
|
||||
(llm-chat-streaming provider prompt
|
||||
(lambda (text) (insert-text text))
|
||||
(lambda (text) (insert-text text)
|
||||
(funcall finish-callback))
|
||||
(lambda (_ msg) (error "Error calling the LLM: %s" msg))))))))
|
||||
|
||||
(defun llama-chat-mode-initialize ()
|
||||
"Set up a new chat buffer."
|
||||
(setq llama-user-input-begin-marker (make-marker)
|
||||
llama-user-input-end-marker (make-marker)
|
||||
llama-ai-response-begin-marker (make-marker)
|
||||
llama-ai-response-end-marker (make-marker))
|
||||
(set-marker-insertion-type llama-user-input-end-marker t)
|
||||
(set-marker-insertion-type llama-ai-response-end-marker t)
|
||||
(set-marker llama-ai-response-begin-marker (point-max))
|
||||
(set-marker llama-ai-response-end-marker (point-max)))
|
||||
|
||||
(define-derived-mode llama-chat-mode text-mode "Llama"
|
||||
"Major mode for chatting with the AI."
|
||||
:interactive nil
|
||||
:group 'llama
|
||||
;; Use markdown-mode for font-locking
|
||||
(setq font-lock-defaults
|
||||
'(markdown-mode-font-lock-keywords
|
||||
nil nil nil nil
|
||||
(font-lock-multiline . t)
|
||||
(font-lock-syntactic-face-function . markdown-syntactic-face)
|
||||
(font-lock-extra-managed-props
|
||||
. (composition display invisible keymap help-echo mouse-face))))
|
||||
(llama-chat-mode-initialize))
|
||||
|
||||
(defun llama-chat-buffer-name ()
|
||||
"*llama-chat*")
|
||||
|
||||
(defun llama-ai-response-finished-callback ()
|
||||
(put-text-property llama-ai-response-begin-marker
|
||||
llama-ai-response-end-marker
|
||||
'read-only t)
|
||||
(let ((inhibit-read-only t))
|
||||
(save-excursion
|
||||
(goto-char (point-max))
|
||||
(insert (propertize "\n\n>" 'read-only t))
|
||||
(insert (propertize " " 'read-only t 'rear-nonsticky '(read-only)))
|
||||
(set-marker llama-user-input-begin-marker (point))
|
||||
(set-marker llama-user-input-end-marker llama-user-input-begin-marker)
|
||||
(set-marker llama-ai-response-begin-marker (point))
|
||||
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker)))
|
||||
(goto-char llama-user-input-begin-marker))
|
||||
|
||||
(defun llama-chat-buffer (name prompt &optional provider)
|
||||
(let ((buffer (get-buffer-create name)))
|
||||
(with-current-buffer buffer
|
||||
(unless (eq major-mode 'llama-chat-mode)
|
||||
(llama-chat-mode)
|
||||
(setq llama-current-chat-prompt prompt)
|
||||
(llama-chat-streaming-to-chat-buffer (or provider llama-llm-provider)
|
||||
llama-current-chat-prompt
|
||||
(current-buffer)
|
||||
llama-ai-response-begin-marker
|
||||
#'llama-ai-response-finished-callback)))
|
||||
buffer))
|
||||
|
||||
(defun llama-chat-send ()
|
||||
(interactive)
|
||||
(unless (eq major-mode 'llama-chat-mode)
|
||||
(error "Not in a llama-chat buffer"))
|
||||
(let ((input (s-trim
|
||||
(buffer-substring-no-properties llama-user-input-begin-marker
|
||||
llama-user-input-end-marker))))
|
||||
(when (s-present? input)
|
||||
(llm-chat-prompt-append-response llama-current-chat-prompt input)
|
||||
(save-excursion
|
||||
(let ((inhibit-read-only t))
|
||||
(goto-char llama-user-input-end-marker)
|
||||
(insert (propertize "\n\n" 'read-only t))
|
||||
(set-marker llama-ai-response-begin-marker (point))))
|
||||
(llama-chat-streaming-to-chat-buffer llama-llm-provider
|
||||
llama-current-chat-prompt
|
||||
(current-buffer)
|
||||
llama-ai-response-begin-marker
|
||||
#'llama-ai-response-finished-callback))))
|
||||
|
||||
(defun llama-chat ()
|
||||
"Start a chat with the AI."
|
||||
(interactive)
|
||||
(pop-to-buffer (llama-chat-buffer
|
||||
"*llama-chat*"
|
||||
(llm-make-chat-prompt
|
||||
"Briefly greet the user without mentioning your name and ask how you can help."
|
||||
:context "You are a helpful AI assistant running inside the Emacs text editor."))))
|
||||
|
||||
(defun llama-doctor()
|
||||
"Start a psycotherapy session with the AI."
|
||||
(interactive)
|
||||
(pop-to-buffer (llama-chat-buffer
|
||||
"*llama-doctor*"
|
||||
(llm-make-chat-prompt
|
||||
"Briefly greet the user without mentioning your name and ask how you can help."
|
||||
:context "You are an empathetic therapist."))))
|
||||
|
||||
(define-key llama-chat-mode-map (kbd "RET") #'llama-chat-send)
|
||||
|
||||
(provide 'llama)
|
||||
;;; llama.el ends here
|
Loading…
Reference in New Issue
Block a user