Remove eval, add filter mechanism

This commit is contained in:
Jeremy Dormitzer 2024-05-29 14:36:09 -04:00
parent d36967e371
commit f1698e3247

View File

@ -42,12 +42,12 @@
:context "You are a helpful AI assistant running inside the Emacs text editor.") :context "You are a helpful AI assistant running inside the Emacs text editor.")
"The arguments for the default initial chat prompt.") "The arguments for the default initial chat prompt.")
(defvar-local llama-chat-allow-eval nil
"Whether to allow the AI to evaluate Emacs Lisp code.")
(defvar-local llama-current-chat-prompt nil (defvar-local llama-current-chat-prompt nil
"Chat prompt object for the current buffer.") "Chat prompt object for the current buffer.")
(defvar-local llama-current-chat-filter nil
"Filter function for the current chat buffer.")
(defvar-local llama-user-input-begin-marker nil (defvar-local llama-user-input-begin-marker nil
"Marker for the beginning of the user's input.") "Marker for the beginning of the user's input.")
@ -61,7 +61,7 @@
"Marker for the AI's response.") "Marker for the AI's response.")
(defun llama-chat-streaming-to-chat-buffer (provider prompt buffer point finish-callback) (defun llama-chat-streaming-to-chat-buffer (provider prompt buffer point finish-callback)
"A version of `llm-chat-streaming-to-point' specialized to write to the chat buffer" "A version of `llm-chat-streaming-to-point' that sets inhibit-read-only to t in the insertion callback."
(with-current-buffer buffer (with-current-buffer buffer
(save-excursion (save-excursion
(let ((start (make-marker)) (let ((start (make-marker))
@ -126,37 +126,11 @@
(defun llama-chat-buffer-name () (defun llama-chat-buffer-name ()
"*llama-chat*") "*llama-chat*")
(defun llama-eval-expressions-in-ai-response (response)
(when llama-chat-allow-eval
(with-temp-buffer
(insert response)
(let ((exprs nil))
(goto-char (point-min))
(while (< (point) (point-max))
(if (search-forward "<EVAL>" nil t)
(let* ((start (point))
(end (when (search-forward "</EVAL>" nil t) (- (point) 7)))
(expr (when end (s-trim (buffer-substring-no-properties start end)))))
(when (s-present? expr)
(push expr exprs)))
(goto-char (point-max))))
(let ((results nil))
(dolist (expr (reverse exprs) (reverse results))
(when (y-or-n-p (format "Evaluate expression? %s" expr))
(condition-case err
(push (format "%s: %s" expr (eval (read expr))) results)
(error (progn
(message "Error evaluating expression: %s" err)
(push (format "%s: ERROR - %s" expr err) results)))))))))))
(defun llama-ai-response-finished-callback () (defun llama-ai-response-finished-callback ()
(put-text-property llama-ai-response-begin-marker (put-text-property llama-ai-response-begin-marker
llama-ai-response-end-marker llama-ai-response-end-marker
'read-only t) 'read-only t)
(let ((results (llama-eval-expressions-in-ai-response (buffer-substring-no-properties (let ((inhibit-read-only t))
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(inhibit-read-only t))
(save-excursion (save-excursion
(goto-char (point-max)) (goto-char (point-max))
(insert (propertize "\n\n" 'read-only t)) (insert (propertize "\n\n" 'read-only t))
@ -164,37 +138,47 @@
(set-marker llama-user-input-begin-marker (point)) (set-marker llama-user-input-begin-marker (point))
(set-marker llama-user-input-end-marker llama-user-input-begin-marker) (set-marker llama-user-input-end-marker llama-user-input-begin-marker)
(set-marker llama-ai-response-begin-marker (point)) (set-marker llama-ai-response-begin-marker (point))
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker)) (set-marker llama-ai-response-end-marker llama-ai-response-begin-marker)))
(when results
(let ((result-string (format "Eval results:\n%s" (s-join "\n" results))))
(llama-send-string-to-chat (current-buffer) result-string))))
(goto-char llama-user-input-begin-marker)) (goto-char llama-user-input-begin-marker))
(cl-defun llama-chat-buffer (name prompt &key provider callback allow-eval) (cl-defun llama-chat-send-prompt (name prompt &key filter)
"Send the PROMPT to the chat buffer named NAME.
If FILTER is provided, it should be a function that accepts the raw AI response
and two callback arguments `insert' and `send'. In the filter function, call
`insert' to insert text into the chat buffer or `send' to send additional text
to the AI (e.g. to provide function call results)."
(with-current-buffer name
(if filter
(cl-flet ((insert (text)
(let ((inhibit-read-only t))
(save-excursion
(goto-chat llama-ai-response-begin-marker)
(insert text)))
(llama-ai-response-finished-callback))
(send (text)
(llm-chat-prompt-append-response prompt text)
(llama-chat-send-prompt name prompt :filter filter)))
(llm-chat-async llama-llm-provider prompt
(lambda (response)
(funcall filter response insert send))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider
prompt
(current-buffer)
llama-ai-response-begin-marker
#'llama-ai-response-finished-callback))))
(cl-defun llama-chat-buffer (name prompt &key provider filter)
(let ((buffer (get-buffer-create name))) (let ((buffer (get-buffer-create name)))
(with-current-buffer buffer (with-current-buffer buffer
(unless (eq major-mode 'llama-chat-mode) (unless (eq major-mode 'llama-chat-mode)
(llama-chat-mode) (llama-chat-mode)
(setq llama-chat-allow-eval allow-eval)
(when provider (when provider
(setq-local llama-llm-provider provider)) (setq-local llama-llm-provider provider))
(when allow-eval (when filter
(let ((context (llm-chat-prompt-context prompt))) (setq-local llama-current-chat-filter filter))
(setf (llm-chat-prompt-context prompt)
(concat context "
You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lisp expression you want to evaluate between the markers <EVAL> and </EVAL>. For example, to evaluate the expression (+ 1 2), you would write: <EVAL>(+ 1 2)</EVAL>. Do not evaluate code in your initial greeting."))))
(setq llama-current-chat-prompt prompt) (setq llama-current-chat-prompt prompt)
(llama-chat-streaming-to-chat-buffer llama-llm-provider (llama-chat-send-prompt name prompt :filter filter)))
llama-current-chat-prompt
(current-buffer)
llama-ai-response-begin-marker
(lambda ()
(let ((ai-response (buffer-substring-no-properties
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(llama-ai-response-finished-callback)
(when callback
(funcall callback ai-response)))))))
buffer)) buffer))
(defun llama-chat-send () (defun llama-chat-send ()
@ -211,11 +195,9 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
(goto-char llama-user-input-end-marker) (goto-char llama-user-input-end-marker)
(insert (propertize "\n\n" 'read-only t)) (insert (propertize "\n\n" 'read-only t))
(set-marker llama-ai-response-begin-marker (point)))) (set-marker llama-ai-response-begin-marker (point))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider (llama-chat-send-prompt (current-buffer)
llama-current-chat-prompt llama-current-chat-prompt
(current-buffer) :filter llama-current-chat-filter))))
llama-ai-response-begin-marker
#'llama-ai-response-finished-callback))))
(defun llama-chat-follow-link () (defun llama-chat-follow-link ()
(interactive) (interactive)
@ -254,14 +236,12 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
(goto-char found) (goto-char found)
(beginning-of-line)))) (beginning-of-line))))
(defun llama-chat (&optional callback) (defun llama-chat ()
"Start a chat with the AI." "Start a chat with the AI."
(interactive) (interactive)
(pop-to-buffer (llama-chat-buffer (pop-to-buffer (llama-chat-buffer
llama-chat-default-name llama-chat-default-name
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args) (apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args))))
:callback callback
:allow-eval t)))
(defun llama-doctor() (defun llama-doctor()
"Start a psycotherapy session with the AI." "Start a psycotherapy session with the AI."
@ -286,42 +266,23 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
"[[" #'llama-chat-previous-prompt "[[" #'llama-chat-previous-prompt
"]]" #'llama-chat-next-prompt)) "]]" #'llama-chat-next-prompt))
(cl-defun llama-send-string-to-chat (name string &key user-visible-string callback initial-prompt) (cl-defun llama-send-string-to-chat (name string &key user-visible-string initial-prompt)
"Send STRING to the chat named NAME. "Send STRING to the chat named NAME.
If USER-VISIBLE-STRING is provided, display that in the chat buffer instead of the original string. If USER-VISIBLE-STRING is provided, display that as the user input in the chat
If CALLBACK is provided, call it with the AI response when it's available. buffer instead of the original string."
(unless (get-buffer name)
If a chat buffer with the given name isn't found, a new one is created using INITIAL-PROMPT, which defaults one made using to `llama-chat-default-initial-prompt-args'." (error "No chat buffer named %s" name))
(cl-flet ((do-send (&rest _)
(with-current-buffer name (with-current-buffer name
(delete-region llama-user-input-begin-marker llama-user-input-end-marker)
(save-excursion
(goto-char llama-user-input-begin-marker)
(insert (or user-visible-string string)))
(llm-chat-prompt-append-response llama-current-chat-prompt string)
(save-excursion (save-excursion
(let ((inhibit-read-only t)) (let ((inhibit-read-only t))
(goto-char llama-user-input-begin-marker)
(insert (or user-visible-string string))
(goto-char llama-user-input-end-marker) (goto-char llama-user-input-end-marker)
(insert (propertize "\n\n" 'read-only t)) (insert (propertize "\n\n" 'read-only t))
(set-marker llama-ai-response-begin-marker (point)))) (set-marker llama-ai-response-begin-marker (point))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider (llm-chat-prompt-append-response llama-current-chat-prompt string)
llama-current-chat-prompt (llama-chat-send-prompt name llama-current-chat-prompt :filter llama-current-chat-filter)))
(current-buffer)
llama-ai-response-begin-marker
(lambda ()
(let ((ai-response (buffer-substring-no-properties
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(llama-ai-response-finished-callback)
(when callback
(funcall callback ai-response))))))))
(if (get-buffer name)
(do-send)
(llama-chat-buffer name
(or initial-prompt
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args))
:callback #'do-send))))
(defun llama-ask-region (start end prompt &optional name) (defun llama-ask-region (start end prompt &optional name)
"Ask the AI in buffer NAME the PROMPT about the region between START and END. "Ask the AI in buffer NAME the PROMPT about the region between START and END.
@ -354,88 +315,61 @@ NAME defaults to `llama-chat-default-name'."
input) input)
(display-buffer (or name llama-chat-default-name)))) (display-buffer (or name llama-chat-default-name))))
(defun llama-replace-in-region-with-context (start end prompt &optional name) (defun llama-replace-in-region (start end prompt)
"Replace the region between START and END with the AI's response to PROMPT (require confirmation). "Replace the region between START and END with the AI's response to PROMPT. Requires confirmation."
(interactive "r\nsPrompt: ")
(let ((buffer (current-buffer))
(llm-prompt (llm-make-chat-prompt (format "PROMPT:\n%s\n\nINPUT:\n%s\n" prompt (buffer-substring-no-properties start end))
:context "You are an AI assistant tasked with generating replacement text based on some input text and a prompt. You will be given a PROMPT and an INPUT, and must produce a REPLACEMENT that replaces the original input and an EXPLANATION that explains why the replacement was chosen. Format your answer like this:
EXPLANATION:
<explanation>
REPLACEMENT:
<replacement>
Includes the surrounding buffer context in the prompt." Do not include any additonal notes or commentary outside of the explanation section - all text following the REPLACEMENT: label should be the verbatim replacement."
(interactive (list (region-beginning) :examples '(("PROMPT:\nCan you fix the grammar in this sentence?\n\nINPUT:\nI loves to eat pizza!\n"
(region-end) .
(read-string "Prompt: ") "EXPLANATION:\nThe correct conjugation for the verb \"love\" in first person singular is \"I love\".\nREPLACEMENT:\nI love to eat pizza!")
(if current-prefix-arg ("PROMPT:\nLowercase all the keys of this JSON object\n\nINPUT:\n{\"Foo\": \"bar\", \"Baz\": \"qux\"}\n"
(completing-read "Chat buffer: " (mapcar (lambda (b) (buffer-name b)) .
(match-buffers (lambda (b) "EXPLANATION:\nI made all the keys of the JSON object lowercase\nREPLACEMENT:\n{\"foo\": \"bar\", \"baz\": \"qux\"}")
(with-current-buffer b ("PROMPT:\nRewrite this into a list of bullet points\n\nINPUT:\nWilliam Barry Wood, Jr. (May 4, 1910 March 9, 1971) was an American football player and medical educator. Wood played quarterback for Harvard during the 19291931 seasons and was one of the most prominent football players of his time. He was elected to the College Football Hall of Fame in 1980.\n"
(eq major-mode 'llama-chat-mode)))))) .
"*llama-replace*"))) "EXPLANATION:\nHere is the rewritten text in a list of bullet points\nREPLACEMENT:\n• William Barry Wood, Jr. (May 4, 1910 March 9, 1971) was an American football player and medical educator.
(let* ((buffer (current-buffer)) He played quarterback for Harvard University during the seasons:
(context-begin (save-excursion + 1929
(goto-char start) + 1930
(vertical-motion -15) + 1931
(point))) He was one of the most prominent football players of his time.
(context-end (save-excursion Wood was elected to the College Football Hall of Fame in 1980.")))))
(goto-char start) (llm-chat-async llama-llm-provider
(vertical-motion 15) llm-prompt
(point))) (lambda (response)
(context (buffer-substring-no-properties context-begin context-end)) (with-temp-buffer
(input (format "Generate replacement text for the following INPUT given the PROMPT and the surrounding CONTEXT. In your response, delimit the suggested replacement with the markers <REPLACE> and </REPLACE>.
CONTEXT: %s
INPUT: %s
PROMPT: %s"
context
(buffer-substring-no-properties start end)
prompt)))
(llama-send-string-to-chat
(or name "*llama-replace*")
input
:callback (lambda (response)
(let ((replacement (with-temp-buffer
(insert response) (insert response)
(goto-char (point-min)) (goto-char (point-min))
(let ((start (search-forward "<REPLACE>" nil t)) (let* ((exp-start (save-excursion
(end (search-forward "</REPLACE>" nil t))) (when (search-forward "EXPLANATION:")
(when (and start end) (point))))
(buffer-substring-no-properties start (- end 10))))))) (replace-start (save-excursion
(if replacement (when (search-forward "REPLACEMENT:")
(when (y-or-n-p (format "Replace region with AI suggestion? %s" replacement)) (point))))
(exp-end (when replace-start (- replace-start (length "REPLACEMENT:"))))
(explanation (when (and exp-start exp-end)
(s-trim (buffer-substring-no-properties exp-start exp-end))))
(replacement (when replace-start
(s-trim (buffer-substring-no-properties replace-start (point-max))))))
(unless replacement
(error "LLM did not return a valid replacement"))
(when (y-or-n-p (format "Explanation:\n%s\n\nReplacment:\n%s\nAccept AI replacement?"
explanation
replacement))
(with-current-buffer buffer (with-current-buffer buffer
(save-excursion
(delete-region start end) (delete-region start end)
(insert replacement))) (goto-char start)
(message "AI did not generate a valid replacement."))))))) (insert replacement)))))))
(lambda (_ msg) (error "Error calling the LLM: %s" msg)))))
(defun llama-replace-in-region (start end prompt &optional name)
"Replace the region between START and END with the AI's response to PROMPT (require confirmation)."
(interactive (list (region-beginning)
(region-end)
(read-string "Prompt: ")
(if current-prefix-arg
(completing-read "Chat buffer: " (mapcar (lambda (b) (buffer-name b))
(match-buffers (lambda (b)
(with-current-buffer b
(eq major-mode 'llama-chat-mode))))))
"*llama-replace*")))
(let* ((buffer (current-buffer))
(input (format "Generate replacement text for the following INPUT given the PROMPT. In your response, delimit the suggested replacement with the markers <REPLACE> and </REPLACE>.
INPUT: %s
PROMPT: %s"
(buffer-substring-no-properties start end)
prompt)))
(llama-send-string-to-chat
(or name "*llama-replace*")
input
:callback (lambda (response)
(let ((replacement (with-temp-buffer
(insert response)
(goto-char (point-min))
(let ((start (search-forward "<REPLACE>" nil t))
(end (search-forward "</REPLACE>" nil t)))
(when (and start end)
(buffer-substring-no-properties start (- end 10)))))))
(if replacement
(when (y-or-n-p (format "Replace region with AI suggestion? %s" replacement))
(with-current-buffer buffer
(delete-region start end)
(insert replacement)))
(message "AI did not generate a valid replacement.")))))))
(provide 'llama) (provide 'llama)
;;; llama.el ends here ;;; llama.el ends here