Give the AI the ability to evaluate elisp
This commit is contained in:
parent
cdd674fdcd
commit
6191654511
@ -42,6 +42,9 @@
|
||||
:context "You are a helpful AI assistant running inside the Emacs text editor.")
|
||||
"The arguments for the default initial chat prompt.")
|
||||
|
||||
(defvar-local llama-chat-allow-eval nil
|
||||
"Whether to allow the AI to evaluate Emacs Lisp code.")
|
||||
|
||||
(defvar-local llama-current-chat-prompt nil
|
||||
"Chat prompt object for the current buffer.")
|
||||
|
||||
@ -117,16 +120,43 @@
|
||||
(font-lock-extra-managed-props
|
||||
. (composition display invisible keymap help-echo mouse-face))))
|
||||
(add-hook 'eldoc-documentation-functions #'llama-chat-eldoc-function nil t)
|
||||
(setq-local window-point-insertion-type t)
|
||||
(llama-chat-mode-initialize))
|
||||
|
||||
(defun llama-chat-buffer-name ()
|
||||
"*llama-chat*")
|
||||
|
||||
(defun llama-eval-expressions-in-ai-response (response)
|
||||
(when llama-chat-allow-eval
|
||||
(with-temp-buffer
|
||||
(insert response)
|
||||
(let ((exprs nil))
|
||||
(goto-char (point-min))
|
||||
(while (< (point) (point-max))
|
||||
(if (search-forward "<EVAL>" nil t)
|
||||
(let* ((start (point))
|
||||
(end (when (search-forward "</EVAL>" nil t) (- (point) 7)))
|
||||
(expr (when end (s-trim (buffer-substring-no-properties start end)))))
|
||||
(when (s-present? expr)
|
||||
(push expr exprs)))
|
||||
(goto-char (point-max))))
|
||||
(let ((results nil))
|
||||
(dolist (expr (reverse exprs) (reverse results))
|
||||
(when (y-or-n-p (format "Evaluate expression? %s" expr))
|
||||
(condition-case err
|
||||
(push (format "%s: %s" expr (eval (read expr))) results)
|
||||
(error (progn
|
||||
(message "Error evaluating expression: %s" err)
|
||||
(push (format "%s: ERROR - %s" expr err) results)))))))))))
|
||||
|
||||
(defun llama-ai-response-finished-callback ()
|
||||
(put-text-property llama-ai-response-begin-marker
|
||||
llama-ai-response-end-marker
|
||||
'read-only t)
|
||||
(let ((inhibit-read-only t))
|
||||
(let ((results (llama-eval-expressions-in-ai-response (buffer-substring-no-properties
|
||||
llama-ai-response-begin-marker
|
||||
llama-ai-response-end-marker)))
|
||||
(inhibit-read-only t))
|
||||
(save-excursion
|
||||
(goto-char (point-max))
|
||||
(insert (propertize "\n\n" 'read-only t))
|
||||
@ -134,16 +164,27 @@
|
||||
(set-marker llama-user-input-begin-marker (point))
|
||||
(set-marker llama-user-input-end-marker llama-user-input-begin-marker)
|
||||
(set-marker llama-ai-response-begin-marker (point))
|
||||
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker)))
|
||||
(set-marker llama-ai-response-end-marker llama-ai-response-begin-marker))
|
||||
(when results
|
||||
(let ((result-string (format "Eval results:\n%s" (s-join "\n" results))))
|
||||
(llama-send-string-to-chat (current-buffer) result-string))))
|
||||
(goto-char llama-user-input-begin-marker))
|
||||
|
||||
(cl-defun llama-chat-buffer (name prompt &key provider callback)
|
||||
(cl-defun llama-chat-buffer (name prompt &key provider callback allow-eval)
|
||||
(let ((buffer (get-buffer-create name)))
|
||||
(with-current-buffer buffer
|
||||
(unless (eq major-mode 'llama-chat-mode)
|
||||
(llama-chat-mode)
|
||||
(setq llama-chat-allow-eval allow-eval)
|
||||
(when provider
|
||||
(setq-local llama-llm-provider provider))
|
||||
(when allow-eval
|
||||
(let ((context (llm-chat-prompt-context prompt)))
|
||||
(setf (llm-chat-prompt-context prompt)
|
||||
(concat context "
|
||||
You have the ability to evaluate Emacs Lisp code. To do so, output the Emacs Lisp expression you want to evaluate between the markers <EVAL> and </EVAL>. For example, to evaluate the expression (+ 1 2), you would write: <EVAL>(+ 1 2)</EVAL>. Do not evaluate code in your initial greeting."))))
|
||||
(setq llama-current-chat-prompt prompt)
|
||||
(llama-chat-streaming-to-chat-buffer (or provider llama-llm-provider)
|
||||
(llama-chat-streaming-to-chat-buffer llama-llm-provider
|
||||
llama-current-chat-prompt
|
||||
(current-buffer)
|
||||
llama-ai-response-begin-marker
|
||||
@ -219,7 +260,8 @@
|
||||
(pop-to-buffer (llama-chat-buffer
|
||||
llama-chat-default-name
|
||||
(apply #'llm-make-chat-prompt llama-chat-default-initial-prompt-args)
|
||||
:callback callback)))
|
||||
:callback callback
|
||||
:allow-eval t)))
|
||||
|
||||
(defun llama-doctor()
|
||||
"Start a psycotherapy session with the AI."
|
||||
@ -340,8 +382,7 @@ PROMPT: %s" (buffer-substring-no-properties start end) prompt)))
|
||||
(with-current-buffer buffer
|
||||
(delete-region start end)
|
||||
(insert replacement)))
|
||||
(message "AI did not generate a valid replacement.")))))
|
||||
(display-buffer (or name llama-chat-default-name))))
|
||||
(message "AI did not generate a valid replacement.")))))))
|
||||
|
||||
(provide 'llama)
|
||||
;;; llama.el ends here
|
||||
|
Loading…
Reference in New Issue
Block a user