Remove eval, add filter mechanism

This commit is contained in:
Jeremy Dormitzer 2024-05-29 14:36:09 -04:00
parent d36967e371
commit f1698e3247

View File

@ -42,12 +42,12 @@
:context "You are a helpful AI assistant running inside the Emacs text editor.")
"The arguments for the default initial chat prompt.")
(defvar-local llama-chat-allow-eval nil
"Whether to allow the AI to evaluate Emacs Lisp code.")
(defvar-local llama-current-chat-prompt nil
"Chat prompt object for the current buffer.")
(defvar-local llama-current-chat-filter nil
"Filter function for the current chat buffer.")
(defvar-local llama-user-input-begin-marker nil
"Marker for the beginning of the user's input.")
@ -61,7 +61,7 @@
"Marker for the AI's response.")
(defun llama-chat-streaming-to-chat-buffer (provider prompt buffer point finish-callback)
"A version of `llm-chat-streaming-to-point' specialized to write to the chat buffer"
"A version of `llm-chat-streaming-to-point' that sets inhibit-read-only to t in the insertion callback."
(with-current-buffer buffer
(save-excursion
(let ((start (make-marker))
@ -126,37 +126,11 @@
(defun llama-chat-buffer-name ()
"*llama-chat*")
(defun llama-eval-expressions-in-ai-response (response)
(when llama-chat-allow-eval
(with-temp-buffer
(insert response)
(let ((exprs nil))
(goto-char (point-min))
(while (< (point) (point-max))
(if (search-forward "<EVAL>" nil t)
(let* ((start (point))
(end (when (search-forward "</EVAL>" nil t) (- (point) 7)))
(expr (when end (s-trim (buffer-substring-no-properties start end)))))
(when (s-present? expr)
(push expr exprs)))
(goto-char (point-max))))
(let ((results nil))
(dolist (expr (reverse exprs) (reverse results))
(when (y-or-n-p (format "Evaluate expression? %s" expr))
(condition-case err
(push (format "%s: %s" expr (eval (read expr))) results)
(error (progn
(message "Error evaluating expression: %s" err)
(push (format "%s: ERROR - %s" expr err) results)))))))))))
(defun llama-ai-response-finished-callback ()
(put-text-property llama-ai-response-begin-marker
llama-ai-response-end-marker
'read-only t)
(let ((results (llama-eval-expressions-in-ai-response (buffer-substring-no-properties
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(inhibit-read-only t))
(let ((inhibit-read-only t))
(save-excursion
(goto-char (point-max))
(insert (propertize "\n\n" 'read-only t))
@ -164,37 +138,47 @@
(set-marker llama-user-input-begin-marker (point))
(set-marker llama-user-input-end-marker llama-user-input-begin-marker)
(set-marker llama-ai-response-begin-marker (point))
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker))
(when results
(let ((result-string (format "Eval results:\n%s" (s-join "\n" results))))
(llama-send-string-to-chat (current-buffer) result-string))))
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker)))
(goto-char llama-user-input-begin-marker))
(cl-defun llama-chat-buffer (name prompt &key provider callback allow-eval)
(cl-defun llama-chat-send-prompt (name prompt &key filter)
"Send the PROMPT to the chat buffer named NAME.
If FILTER is provided, it should be a function that accepts the raw AI response
and two callback arguments `insert' and `send'. In the filter function, call
`insert' to insert text into the chat buffer or `send' to send additional text
to the AI (e.g. to provide function call results)."
(with-current-buffer name
(if filter
(cl-flet ((insert (text)
(let ((inhibit-read-only t))
(save-excursion
(goto-chat llama-ai-response-begin-marker)
(insert text)))
(llama-ai-response-finished-callback))
(send (text)
(llm-chat-prompt-append-response prompt text)
(llama-chat-send-prompt name prompt :filter filter)))
(llm-chat-async llama-llm-provider prompt
(lambda (response)
(funcall filter response insert send))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider
prompt
(current-buffer)
llama-ai-response-begin-marker
#'llama-ai-response-finished-callback))))
(cl-defun llama-chat-buffer (name prompt &key provider filter)
(let ((buffer (get-buffer-create name)))
(with-current-buffer buffer
(unless (eq major-mode 'llama-chat-mode)
(llama-chat-mode)
(setq llama-chat-allow-eval allow-eval)
(when provider
(setq-local llama-llm-provider provider))
(when allow-eval
(let ((context (llm-chat-prompt-context prompt)))
(setf (llm-chat-prompt-context prompt)
(concat context "
You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lisp expression you want to evaluate between the markers <EVAL> and </EVAL>. For example, to evaluate the expression (+ 1 2), you would write: <EVAL>(+ 1 2)</EVAL>. Do not evaluate code in your initial greeting."))))
(when filter
(setq-local llama-current-chat-filter filter))
(setq llama-current-chat-prompt prompt)
(llama-chat-streaming-to-chat-buffer llama-llm-provider
llama-current-chat-prompt
(current-buffer)
llama-ai-response-begin-marker
(lambda ()
(let ((ai-response (buffer-substring-no-properties
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(llama-ai-response-finished-callback)
(when callback
(funcall callback ai-response)))))))
(llama-chat-send-prompt name prompt :filter filter)))
buffer))
(defun llama-chat-send ()
@ -211,11 +195,9 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
(goto-char llama-user-input-end-marker)
(insert (propertize "\n\n" 'read-only t))
(set-marker llama-ai-response-begin-marker (point))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider
llama-current-chat-prompt
(current-buffer)
llama-ai-response-begin-marker
#'llama-ai-response-finished-callback))))
(llama-chat-send-prompt (current-buffer)
llama-current-chat-prompt
:filter llama-current-chat-filter))))
(defun llama-chat-follow-link ()
(interactive)
@ -254,14 +236,12 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
(goto-char found)
(beginning-of-line))))
(defun llama-chat (&optional callback)
(defun llama-chat ()
"Start a chat with the AI."
(interactive)
(pop-to-buffer (llama-chat-buffer
llama-chat-default-name
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args)
:callback callback
:allow-eval t)))
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args))))
(defun llama-doctor()
"Start a psycotherapy session with the AI."
@ -286,42 +266,23 @@ You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lis
"[[" #'llama-chat-previous-prompt
"]]" #'llama-chat-next-prompt))
(cl-defun llama-send-string-to-chat (name string &key user-visible-string callback initial-prompt)
(cl-defun llama-send-string-to-chat (name string &key user-visible-string initial-prompt)
"Send STRING to the chat named NAME.
If USER-VISIBLE-STRING is provided, display that in the chat buffer instead of the original string.
If CALLBACK is provided, call it with the AI response when it's available.
If a chat buffer with the given name isn't found, a new one is created using INITIAL-PROMPT, which defaults one made using to `llama-chat-default-initial-prompt-args'."
(cl-flet ((do-send (&rest _)
(with-current-buffer name
(delete-region llama-user-input-begin-marker llama-user-input-end-marker)
(save-excursion
(goto-char llama-user-input-begin-marker)
(insert (or user-visible-string string)))
(llm-chat-prompt-append-response llama-current-chat-prompt string)
(save-excursion
(let ((inhibit-read-only t))
(goto-char llama-user-input-end-marker)
(insert (propertize "\n\n" 'read-only t))
(set-marker llama-ai-response-begin-marker (point))))
(llama-chat-streaming-to-chat-buffer llama-llm-provider
llama-current-chat-prompt
(current-buffer)
llama-ai-response-begin-marker
(lambda ()
(let ((ai-response (buffer-substring-no-properties
llama-ai-response-begin-marker
llama-ai-response-end-marker)))
(llama-ai-response-finished-callback)
(when callback
(funcall callback ai-response))))))))
(if (get-buffer name)
(do-send)
(llama-chat-buffer name
(or initial-prompt
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args))
:callback #'do-send))))
If USER-VISIBLE-STRING is provided, display that as the user input in the chat
buffer instead of the original string."
(unless (get-buffer name)
(error "No chat buffer named %s" name))
(with-current-buffer name
(save-excursion
(let ((inhibit-read-only t))
(goto-char llama-user-input-begin-marker)
(insert (or user-visible-string string))
(goto-char llama-user-input-end-marker)
(insert (propertize "\n\n" 'read-only t))
(set-marker llama-ai-response-begin-marker (point))))
(llm-chat-prompt-append-response llama-current-chat-prompt string)
(llama-chat-send-prompt name llama-current-chat-prompt :filter llama-current-chat-filter)))
(defun llama-ask-region (start end prompt &optional name)
"Ask the AI in buffer NAME the PROMPT about the region between START and END.
@ -354,88 +315,61 @@ NAME defaults to `llama-chat-default-name'."
input)
(display-buffer (or name llama-chat-default-name))))
(defun llama-replace-in-region-with-context (start end prompt &optional name)
"Replace the region between START and END with the AI's response to PROMPT (require confirmation).
(defun llama-replace-in-region (start end prompt)
"Replace the region between START and END with the AI's response to PROMPT. Requires confirmation."
(interactive "r\nsPrompt: ")
(let ((buffer (current-buffer))
(llm-prompt (llm-make-chat-prompt (format "PROMPT:\n%s\n\nINPUT:\n%s\n" prompt (buffer-substring-no-properties start end))
:context "You are an AI assistant tasked with generating replacement text based on some input text and a prompt. You will be given a PROMPT and an INPUT, and must produce a REPLACEMENT that replaces the original input and an EXPLANATION that explains why the replacement was chosen. Format your answer like this:
EXPLANATION:
<explanation>
REPLACEMENT:
<replacement>
Includes the surrounding buffer context in the prompt."
(interactive (list (region-beginning)
(region-end)
(read-string "Prompt: ")
(if current-prefix-arg
(completing-read "Chat buffer: " (mapcar (lambda (b) (buffer-name b))
(match-buffers (lambda (b)
(with-current-buffer b
(eq major-mode 'llama-chat-mode))))))
"*llama-replace*")))
(let* ((buffer (current-buffer))
(context-begin (save-excursion
(goto-char start)
(vertical-motion -15)
(point)))
(context-end (save-excursion
(goto-char start)
(vertical-motion 15)
(point)))
(context (buffer-substring-no-properties context-begin context-end))
(input (format "Generate replacement text for the following INPUT given the PROMPT and the surrounding CONTEXT. In your response, delimit the suggested replacement with the markers <REPLACE> and </REPLACE>.
CONTEXT: %s
INPUT: %s
PROMPT: %s"
context
(buffer-substring-no-properties start end)
prompt)))
(llama-send-string-to-chat
(or name "*llama-replace*")
input
:callback (lambda (response)
(let ((replacement (with-temp-buffer
(insert response)
(goto-char (point-min))
(let ((start (search-forward "<REPLACE>" nil t))
(end (search-forward "</REPLACE>" nil t)))
(when (and start end)
(buffer-substring-no-properties start (- end 10)))))))
(if replacement
(when (y-or-n-p (format "Replace region with AI suggestion? %s" replacement))
(with-current-buffer buffer
(delete-region start end)
(insert replacement)))
(message "AI did not generate a valid replacement.")))))))
(defun llama-replace-in-region (start end prompt &optional name)
"Replace the region between START and END with the AI's response to PROMPT (require confirmation)."
(interactive (list (region-beginning)
(region-end)
(read-string "Prompt: ")
(if current-prefix-arg
(completing-read "Chat buffer: " (mapcar (lambda (b) (buffer-name b))
(match-buffers (lambda (b)
(with-current-buffer b
(eq major-mode 'llama-chat-mode))))))
"*llama-replace*")))
(let* ((buffer (current-buffer))
(input (format "Generate replacement text for the following INPUT given the PROMPT. In your response, delimit the suggested replacement with the markers <REPLACE> and </REPLACE>.
INPUT: %s
PROMPT: %s"
(buffer-substring-no-properties start end)
prompt)))
(llama-send-string-to-chat
(or name "*llama-replace*")
input
:callback (lambda (response)
(let ((replacement (with-temp-buffer
(insert response)
(goto-char (point-min))
(let ((start (search-forward "<REPLACE>" nil t))
(end (search-forward "</REPLACE>" nil t)))
(when (and start end)
(buffer-substring-no-properties start (- end 10)))))))
(if replacement
(when (y-or-n-p (format "Replace region with AI suggestion? %s" replacement))
(with-current-buffer buffer
(delete-region start end)
(insert replacement)))
(message "AI did not generate a valid replacement.")))))))
Do not include any additonal notes or commentary outside of the explanation section - all text following the REPLACEMENT: label should be the verbatim replacement."
:examples '(("PROMPT:\nCan you fix the grammar in this sentence?\n\nINPUT:\nI loves to eat pizza!\n"
.
"EXPLANATION:\nThe correct conjugation for the verb \"love\" in first person singular is \"I love\".\nREPLACEMENT:\nI love to eat pizza!")
("PROMPT:\nLowercase all the keys of this JSON object\n\nINPUT:\n{\"Foo\": \"bar\", \"Baz\": \"qux\"}\n"
.
"EXPLANATION:\nI made all the keys of the JSON object lowercase\nREPLACEMENT:\n{\"foo\": \"bar\", \"baz\": \"qux\"}")
("PROMPT:\nRewrite this into a list of bullet points\n\nINPUT:\nWilliam Barry Wood, Jr. (May 4, 1910 March 9, 1971) was an American football player and medical educator. Wood played quarterback for Harvard during the 19291931 seasons and was one of the most prominent football players of his time. He was elected to the College Football Hall of Fame in 1980.\n"
.
"EXPLANATION:\nHere is the rewritten text in a list of bullet points\nREPLACEMENT:\n• William Barry Wood, Jr. (May 4, 1910 March 9, 1971) was an American football player and medical educator.
He played quarterback for Harvard University during the seasons:
+ 1929
+ 1930
+ 1931
He was one of the most prominent football players of his time.
Wood was elected to the College Football Hall of Fame in 1980.")))))
(llm-chat-async llama-llm-provider
llm-prompt
(lambda (response)
(with-temp-buffer
(insert response)
(goto-char (point-min))
(let* ((exp-start (save-excursion
(when (search-forward "EXPLANATION:")
(point))))
(replace-start (save-excursion
(when (search-forward "REPLACEMENT:")
(point))))
(exp-end (when replace-start (- replace-start (length "REPLACEMENT:"))))
(explanation (when (and exp-start exp-end)
(s-trim (buffer-substring-no-properties exp-start exp-end))))
(replacement (when replace-start
(s-trim (buffer-substring-no-properties replace-start (point-max))))))
(unless replacement
(error "LLM did not return a valid replacement"))
(when (y-or-n-p (format "Explanation:\n%s\n\nReplacment:\n%s\nAccept AI replacement?"
explanation
replacement))
(with-current-buffer buffer
(save-excursion
(delete-region start end)
(goto-char start)
(insert replacement)))))))
(lambda (_ msg) (error "Error calling the LLM: %s" msg)))))
(provide 'llama)
;;; llama.el ends here