diff --git a/emacs/.emacs.d/config/init-ai.el b/emacs/.emacs.d/config/init-ai.el index f7b1c73..73617ec 100644 --- a/emacs/.emacs.d/config/init-ai.el +++ b/emacs/.emacs.d/config/init-ai.el @@ -28,6 +28,32 @@ models-alist))))) (setq gptel-backend (car backend) gptel-model (cadr backend))) + (defcustom gptel-ollama-context-window 16384 + "The number of characters to include in the context window for Ollama requests." + :type 'integer + :group 'gptel) + ;; Increase Ollama context window by overriding gptel--request-data + (cl-defmethod gptel--request-data ((_backend gptel-ollama) prompts) + "JSON encode PROMPTS for sending to ChatGPT." + (let ((prompts-plist + `(:model ,gptel-model + :messages [,@prompts] + :stream ,(or (and gptel-stream gptel-use-curl + (gptel-backend-stream gptel-backend)) + :json-false))) + ;; TODO num_ctx chosen according to #330, make customizable + (options-plist `(:num_ctx ,gptel-ollama-context-window))) + (when gptel-temperature + (setq options-plist + (plist-put options-plist :temperature + gptel-temperature))) + (when gptel-max-tokens + (setq options-plist + (plist-put options-plist :num_predict + gptel-max-tokens))) + (when options-plist + (plist-put prompts-plist :options options-plist)) + prompts-plist)) (add-to-list 'gptel-directives '(shell-command . "You are a command line helper. Generate shell commands that do what is requested, without any additional description or explanation. Reply in plain text with no Markdown or other syntax. Reply with the command only.")) (add-to-list 'gptel-directives '(org-mode . "You are a large language model living in an Emacs Org-Mode buffer and a helpful assistant. You may evaluate Emacs Lisp, Python, and shell-script code when necessary by outputting an Org-Mode source block. You don't need to ask for confirmation before evaluating code. The user will execute the source block and display the results in the buffer. Respond concisely.