[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[elpa] externals/llm 9a3fc01cac 17/34: Switch from generic to per-provid
From: |
Andrew Hyatt |
Subject: |
[elpa] externals/llm 9a3fc01cac 17/34: Switch from generic to per-provider sync solution |
Date: |
Sat, 16 Sep 2023 01:32:48 -0400 (EDT) |
branch: externals/llm
commit 9a3fc01cac06c17e00d36a48990a638217692238
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>
Switch from generic to per-provider sync solution
The previous method of convering async calls to sync had issues with
threading,
even after some basic fixes to the method. It's more reliable to handle
this on
a per-provider basis, by having all the providers actually implementing
their
own sync calls.
---
llm-fake.el | 29 ++++++++++++++----
llm-openai.el | 98 ++++++++++++++++++++++++++++++++++++++---------------------
llm-vertex.el | 89 +++++++++++++++++++++++++++++++++++------------------
llm.el | 36 ++++++----------------
4 files changed, 155 insertions(+), 97 deletions(-)
diff --git a/llm-fake.el b/llm-fake.el
index 8a72ccebd1..f6142c0dec 100644
--- a/llm-fake.el
+++ b/llm-fake.el
@@ -46,7 +46,18 @@ either a vector response for the chat, or a signal symbol and
message cons. If nil, the response will be a simple vector."
output-to-buffer chat-action-func embedding-action-func)
+(defun llm-fake--chat-response (provider prompt)
+ "Produce a fake chat response.
+PROVIDER, PROMPT are as in `llm-chat-response.'"
+ )
+
(cl-defmethod llm-chat-response-async ((provider llm-fake) prompt
response-callback error-callback)
+ (condition-case err
+ (funcall response-callback (llm-chat-response provider prompt))
+ (t (funcall error-callback (car err) (cdr err))))
+ nil)
+
+(cl-defmethod llm-chat-response ((provider llm-fake) prompt)
(when (llm-fake-output-to-buffer provider)
(with-current-buffer (get-buffer-create (llm-fake-output-to-buffer
provider))
(goto-char (point-max))
@@ -55,12 +66,12 @@ message cons. If nil, the response will be a simple vector."
(let* ((f (llm-fake-chat-action-func provider))
(result (funcall f)))
(pcase (type-of result)
- ('string (funcall response-callback result))
- ('cons (funcall error-callback (car result) (cdr result)))
+ ('string result)
+ ('cons (signal (car result) (cdr result)))
(_ (error "Incorrect type found in `chat-action-func': %s"
(type-of-result)))))
- (funcall response-callback "Sample response from
`llm-chat-response-async'")))
+ "Sample response from `llm-chat-response-async'"))
-(cl-defmethod llm-embedding-async ((provider llm-fake) string vector-callback
error-callback)
+(cl-defmethod llm-embedding ((provider llm-fake) string)
(when (llm-fake-output-to-buffer provider)
(with-current-buffer (get-buffer-create (llm-fake-output-to-buffer
provider))
(goto-char (point-max))
@@ -70,8 +81,14 @@ message cons. If nil, the response will be a simple vector."
(result (funcall f)))
(pcase (type-of result)
('vector (funcall vector-callback result))
- ('cons (funcall error-callback (car result) (cdr result)))
+ ('cons (signal (car result) (cdr result)))
(_ (error "Incorrect type found in `chat-embedding-func': %s"
(type-of-result)))))
- (funcall vector-callback [0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9])))
+ [0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]))
+
+(cl-defmethod llm-embedding-async ((provider llm-fake) string vector-callback
error-callback)
+ (condition-case err
+ (funcall vector-callback (llm-embedding provider string))
+ (t (funcall error-callback (car err) (cdr err))))
+ nil)
(provide 'llm-fake)
diff --git a/llm-openai.el b/llm-openai.el
index 9478878322..199ee86f14 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -50,26 +50,42 @@ EMBEDDING-MODEL is the model to use for embeddings. If
unset, it
will use a reasonable default."
key chat-model embedding-model)
-(cl-defmethod llm-embedding-async ((provider llm-openai) string
vector-callback error-callback)
+(defun llm-openai--embedding-make-request (provider string vector-callback
error-callback sync)
+ "Make a request to Open AI to get an embedding for STRING.
+PROVIDER, VECTOR-CALLBACK and ERROR-CALLBACK are as in the
+`llm-embedding-async' call. SYNC is non-nil when the request
+should wait until the response is received."
(unless (llm-openai-key provider)
- (error "To call Open AI API, provide the ekg-embedding-api-key"))
+ (error "To call Open AI API, add a key to the `llm-openai' provider."))
(request "https://api.openai.com/v1/embeddings"
- :type "POST"
- :headers `(("Authorization" . ,(format "Bearer %s"
ekg-embedding-api-key))
- ("Content-Type" . "application/json"))
- :data (json-encode `(("input" . ,string) ("model" . ,(or
(llm-openai-embedding-model provider) "text-embedding-ada-002"))))
- :parser 'json-read
- :success (cl-function (lambda (&key data &allow-other-keys)
- (funcall vector-callback
- (cdr (assoc 'embedding (aref
(cdr (assoc 'data data)) 0))))))
- :error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
- (funcall error-callback 'error
- (format "Problem calling Open
AI: %s, type: %s message: %s"
- (cdr error-thrown)
- (assoc-default 'type
(cdar data))
- (assoc-default 'message
(cdar data))))))))
-
-(defun llm-openai--chat-response (provider prompt response-callback
error-callback &optional return-json-spec)
+ :type "POST"
+ :sync sync
+ :timeout 5
+ :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key
provider)))
+ ("Content-Type" . "application/json"))
+ :data (json-encode `(("input" . ,string) ("model" . ,(or
(llm-openai-embedding-model provider) "text-embedding-ada-002"))))
+ :parser 'json-read
+ :success (cl-function (lambda (&key data &allow-other-keys)
+ (funcall vector-callback
+ (cdr (assoc 'embedding (aref (cdr (assoc
'data data)) 0))))))
+ :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
+ (funcall error-callback 'error
+ (format "Problem calling Open AI: %s, type:
%s message: %s"
+ (cdr error-thrown)
+ (assoc-default 'type (cdar data))
+ (assoc-default 'message (cdar
data))))))))
+
+(cl-defmethod llm-embedding-async ((provider llm-openai) string
vector-callback error-callback)
+ (llm-openai--embedding-make-request provider string vector-callback
error-callback nil))
+
+(cl-defmethod llm-embedding ((provider llm-openai) string)
+ (let ((response))
+ (llm-openai--embedding-make-request provider string
+ (lambda (vector) (setq response
vector))
+ (lambda (_ error-message) (error
error-message)) t)
+ response))
+
+(defun llm-openai--chat-response (provider prompt response-callback
error-callback &optional return-json-spec sync)
"Main method to send a PROMPT as a chat prompt to Open AI.
RETURN-JSON-SPEC, if specified, is a JSON spec to return from the
Open AI API.
@@ -79,7 +95,9 @@ PROVIDER is a `llm-openai' struct which holds the key and
other options.
RESPONSE-CALLBACK is a function to call with the LLM response.
ERROR-CALLBACK is called if there is an error, with the error
-signal and message."
+signal and message.
+
+SYNC is non-nil when the request should wait until the response is received."
(unless (llm-openai-key provider)
(error "To call Open AI API, the key must have been set"))
(let (request-alist system-prompt)
@@ -118,26 +136,36 @@ signal and message."
(push '("function_call" . (("name" . "output"))) request-alist))
(request "https://api.openai.com/v1/chat/completions"
- :type "POST"
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode request-alist)
- :parser 'json-read
- :success (cl-function
- (lambda (&key data &allow-other-keys)
- (let ((result (cdr (assoc 'content (cdr (assoc
'message (aref (cdr (assoc 'choices data)) 0))))))
- (func-result (cdr (assoc 'arguments (cdr
(assoc 'function_call (cdr (assoc 'message (aref (cdr (assoc 'choices data))
0)))))))))
- (funcall response-callback (or func-result
result)))))
- :error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
- (funcall error-callback
- (format "Problem calling Open
AI: %s, type: %s message: %s"
- (cdr error-thrown)
- (assoc-default 'type
(cdar data))
- (assoc-default
'message (cdar data)))))))))
+ :type "POST"
+ :sync sync
+ :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key
provider)))
+ ("Content-Type" . "application/json"))
+ :data (json-encode request-alist)
+ :parser 'json-read
+ :success (cl-function
+ (lambda (&key data &allow-other-keys)
+ (let ((result (cdr (assoc 'content (cdr (assoc 'message
(aref (cdr (assoc 'choices data)) 0))))))
+ (func-result (cdr (assoc 'arguments (cdr (assoc
'function_call (cdr (assoc 'message (aref (cdr (assoc 'choices data))
0)))))))))
+ (funcall response-callback (or func-result result)))))
+ :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
+ (funcall error-callback
+ 'error
+ (format "Problem calling Open AI: %s,
type: %s message: %s"
+ (cdr error-thrown)
+ (assoc-default 'type (cdar data))
+ (assoc-default 'message (cdar
data)))))))))
(cl-defmethod llm-chat-response-async ((provider llm-openai) prompt
response-callback error-callback)
(llm-openai--chat-response provider prompt response-callback error-callback))
+(cl-defmethod llm-chat-response ((provider llm-openai) prompt)
+ (let ((response))
+ (llm-openai--chat-response provider prompt
+ (lambda (result) (setq response result))
+ (lambda (_ msg) (error msg))
+ nil t)
+ response))
+
(provide 'llm-openai)
;;; llm-openai.el ends here
diff --git a/llm-vertex.el b/llm-vertex.el
index 41fd97d1e9..e51e9c8d3b 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -69,28 +69,46 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(setf (llm-vertex-key provider) result))
(setf (llm-vertex-key-gentime provider) (current-time))))
-(cl-defmethod llm-embedding-async ((provider llm-vertex) string
vector-callback error-callback)
+(defun llm-vertex--embedding (provider string vector-callback error-callback
sync)
+ "Get the embedding for STRING.
+PROVIDER, VECTOR-CALLBACK, ERROR-CALLBACK are all the same as
`llm-embedding-async'.
+SYNC, when non-nil, will wait until the response is available to return."
(llm-vertex-refresh-key provider)
(request (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
llm-vertex-gcloud-region
(llm-vertex-project provider)
llm-vertex-gcloud-region
(or (llm-vertex-embedding-model provider)
"textembedding-gecko"))
- :type "POST"
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode `(("instances" . [(("content" .
,string))])))
- :parser 'json-read
- :success (cl-function
- (lambda (&key data &allow-other-keys)
- (funcall vector-callback
- (cdr (assoc 'values (cdr (assoc
'embeddings (aref (cdr (assoc 'predictions data)) 0))))))))
- :error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
- (funcall error-callback
- (error (format "Problem calling
GCloud AI: %s (%S)"
- (cdr error-thrown)
data)))))))
+ :sync sync
+ :timeout 5
+ :type "POST"
+ :headers `(("Authorization" . ,(format "Bearer %s" (llm-vertex-key
provider)))
+ ("Content-Type" . "application/json"))
+ :data (json-encode `(("instances" . [(("content" . ,string))])))
+ :parser 'json-read
+ :success (cl-function
+ (lambda (&key data &allow-other-keys)
+ (funcall vector-callback
+ (cdr (assoc 'values (cdr (assoc 'embeddings (aref
(cdr (assoc 'predictions data)) 0))))))))
+ :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
+ (funcall error-callback
+ (error (format "Problem calling GCloud AI:
%s (%S)"
+ (cdr error-thrown)
data)))))))
-(cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt
response-callback error-callback)
+(cl-defmethod llm-embedding-async ((provider llm-vertex) string
vector-callback error-callback)
+ (llm-vertex--embedding provider string vector-callback error-callback nil))
+
+(cl-defmethod llm-embedding ((provider llm-vertex) string)
+ (let ((response))
+ (llm-vertex--embedding provider string
+ (lambda (vector) (setq response vector))
+ (lambda (_ error-message) (error error-message)) t)
+ response))
+
+(defun llm-vertex--chat-response (provider prompt response-callback
error-callback sync)
+ "Get the chat response for PROMPT.
+PROVIDER, RESPONSE-CALLBACK, ERROR-CALLBACK are all the same as
`llm-chat-response-async'.
+SYNC, when non-nil, will wait until the response is available to return."
(llm-vertex-refresh-key provider)
(let ((request-alist))
(when (llm-chat-prompt-context prompt)
@@ -121,21 +139,32 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(llm-vertex-project provider)
llm-vertex-gcloud-region
(or (llm-vertex-chat-model provider)
"chat-bison"))
- :type "POST"
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode `(("instances" . [,request-alist])))
- :parser 'json-read
- :success (cl-function (lambda (&key data
&allow-other-keys)
- (funcall response-callback
- (cdr (assoc 'content
(aref (cdr (assoc 'candidates (aref (cdr (assoc 'predictions data)) 0))) 0))))))
- :error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
- (funcall error-callback 'error
- (error (format "Problem
calling GCloud AI: %s, status: %s message: %s (%s)"
- (cdr
error-thrown)
-
(assoc-default 'status (assoc-default 'error data))
-
(assoc-default 'message (assoc-default 'error data))
-
data))))))))
+ :type "POST"
+ :sync sync
+ :headers `(("Authorization" . ,(format "Bearer %s" (llm-vertex-key
provider)))
+ ("Content-Type" . "application/json"))
+ :data (json-encode `(("instances" . [,request-alist])))
+ :parser 'json-read
+ :success (cl-function (lambda (&key data &allow-other-keys)
+ (funcall response-callback
+ (cdr (assoc 'content (aref (cdr (assoc
'candidates (aref (cdr (assoc 'predictions data)) 0))) 0))))))
+ :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
+ (funcall error-callback 'error
+ (error (format "Problem calling GCloud
AI: %s, status: %s message: %s (%s)"
+ (cdr error-thrown)
+ (assoc-default 'status
(assoc-default 'error data))
+ (assoc-default 'message
(assoc-default 'error data))
+ data))))))))
+
+(cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt
response-callback error-callback)
+ (llm-vertex--chat-response provider prompt response-callback error-callback
nil))
+
+(cl-defmethod llm-chat-response ((provider llm-vertex) prompt)
+ (let ((response))
+ (llm-vertex--chat-response provider prompt
+ (lambda (result) (setq response result))
+ (lambda (_ error-message) (error
error-message)) t)
+ response))
(provide 'llm-vertex)
diff --git a/llm.el b/llm.el
index 3c18ac8253..f01a130cf8 100644
--- a/llm.el
+++ b/llm.el
@@ -69,33 +69,18 @@ MAX-TOKENS is the maximum number of tokens to generate.
This is optional.
ROLE can a symbol, of either `user' or `assistant'."
role content)
-(defun llm--run-async-as-sync (f &rest args)
- "Call async function F, passing ARGS.
-Two args will be appended to the end; a success callback, and an
-error callback. This will block until the async function calls
-one of the callbacks.
-
-The return value will be the value passed into the success callback."
- (let* ((mutex (make-mutex "llm-chat-response"))
- (cv (make-condition-variable mutex))
- (response))
- (apply f (append args
- (list
- (lambda (result)
- (with-mutex mutex
- (setq response result)
- (condition-notify cv)))
- (lambda (type msg)
- (with-mutex mutex
- (message "async to sync, got error")
- (signal type msg)
- (condition-notify cv))))))
- response))
+(defun llm-make-simple-chat-prompt (text)
+ "Create a `llm-chat-prompt' with TEXT sent to the LLM provider.
+This is a helper function for when you just need to send text to
+an LLM, and don't need the more advanced features that the
+`llm-chat-prompt' struct makes available."
+ (make-llm-chat-prompt :interactions (list (make-llm-chat-prompt-interaction
:role 'user :content text))))
(cl-defgeneric llm-chat-response (provider prompt)
"Return a response to PROMPT from PROVIDER.
PROMPT is a `llm-chat-prompt'. The response is a string."
- (llm--run-async-as-sync #'llm-chat-response-async provider prompt))
+ (ignore provider prompt)
+ (signal 'not-implemented nil))
(cl-defmethod llm-chat-response ((_ (eql nil)) _)
(error "LLM provider was nil. Please set the provider in the application
you are using."))
@@ -113,7 +98,8 @@ ERROR-CALLBACK receives the error response."
(cl-defgeneric llm-embedding (provider string)
"Return a vector embedding of STRING from PROVIDER."
- (llm--run-async-as-sync #'llm-embedding-async provider string))
+ (ignore provider string)
+ (signal 'not-implemented nil))
(cl-defmethod llm-embedding ((_ (eql nil)) _)
(error "LLM provider was nil. Please set the provider in the application
you are using."))
@@ -162,5 +148,3 @@ This should only be used for logging or debugging."
"")))
(provide 'llm)
-
-;;; llm.el ends here
- [elpa] externals/llm 636014bf64 08/34: Make all remaining code async-friendly, (continued)
- [elpa] externals/llm 636014bf64 08/34: Make all remaining code async-friendly, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 9e3040bad2 20/34: Add warnings requested by GNU about nonfree software, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm c8b14b4d9c 19/34: Fix fake provider embedding func and remove async unit tests, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 9057a50df4 11/34: Fix indenting in llm--run-async-as-sync, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm c322577b9b 13/34: Test both sync and async commands, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm cff9ab8f3c 22/34: Centralize nonfree llm warnings, and warn with a targeted type, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm ad230d9d6b 10/34: Add methods for nil provider, to throw more meaningful errors, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 650bba65d5 25/34: Improve the docstring for llm--warn-on-nonfree, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm b2f1605514 33/34: Delete some trailing whitespace, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 39ae6fc794 34/34: Assign copyright to FSF, in preparation of inclusion to GNU ELPA, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 9a3fc01cac 17/34: Switch from generic to per-provider sync solution,
Andrew Hyatt <=
- [elpa] externals/llm eba797b295 04/34: Implement error handling for gcloud auth issues, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 16ee85fd11 05/34: Add async options, and made the sync options just use those and wait, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 3919b77383 06/34: Implement confusion and typos in README.org, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm b52958757a 18/34: Fix docstring wider than 80 characters in llm-vertex, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm abbff2aa9d 23/34: Change method name to llm-chat (without "-response"), update README, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm e94bc937c7 27/34: Fix issue with llm-chat before method having too many arguments, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 7edd36b2dc 28/34: Fix obsolete or incorrect function calls in llm-fake, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm d4bbe9d84c 29/34: Fix incorrect requires in openai and vertex implementations, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 723c0b3786 31/34: Minor README whitespace and formatting fixes, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 8f30feb5c1 32/34: README improvements, including noting the nonfree llm warning, Andrew Hyatt, 2023/09/16