Fix llm output buffer sentinal formatting and add autoloads

This commit is contained in:
Jeremy Dormitzer 2024-05-21 10:26:45 -04:00
parent df79eee678
commit 3d39b0c3df

View File

@ -86,7 +86,7 @@
(goto-char (point-max))
(newline)
(newline)
(insert (format "[llm %s]" string))))
(insert (format "[llm %s]" (s-trim string)))))
(defun llm--run-async (name buffer-name &rest llm-args)
"Run llm with LLM-ARGS asynchronously.
@ -221,6 +221,7 @@ The process is named NAME and runs in BUFFER-NAME."
(list "-m" llm-model))))
(append (list "chat") model sys opts)))
;;;###autoload
(defun llm-chat (system-prompt &optional name)
"Start a chat session with llm, prompting it with SYSTEM-PROMPT, naming the process and buffer NAME."
(interactive (list (read-string "System prompt: " "You are a helpful AI assistant.")
@ -243,6 +244,7 @@ The process is named NAME and runs in BUFFER-NAME."
(when buffer
(pop-to-buffer buffer))))
;;;###autoload
(defun llm-doctor ()
"Start a psychotherapy session with llm."
(interactive)