[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[elpa] externals/llm 414d25a625 09/34: Removed various unused things, an
From: |
Andrew Hyatt |
Subject: |
[elpa] externals/llm 414d25a625 09/34: Removed various unused things, and format fixes |
Date: |
Sat, 16 Sep 2023 01:32:48 -0400 (EDT) |
branch: externals/llm
commit 414d25a625201acc0f7b87f6fdb8eca2b48d5bc8
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>
Removed various unused things, and format fixes
This fixes all byte compile warnings, and notably fixes an incorrect error
message formatting in the vertex provider.
---
llm-openai.el | 4 ++--
llm-vertex.el | 16 ++++++++--------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/llm-openai.el b/llm-openai.el
index 45dee5fc4d..9478878322 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -117,7 +117,7 @@ signal and message."
request-alist)
(push '("function_call" . (("name" . "output"))) request-alist))
- (let* ((resp (request "https://api.openai.com/v1/chat/completions"
+ (request "https://api.openai.com/v1/chat/completions"
:type "POST"
:headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider)))
("Content-Type" . "application/json"))
@@ -133,7 +133,7 @@ signal and message."
(format "Problem calling Open
AI: %s, type: %s message: %s"
(cdr error-thrown)
(assoc-default 'type
(cdar data))
- (assoc-default
'message (cdar data))))))))))))
+ (assoc-default
'message (cdar data)))))))))
(cl-defmethod llm-chat-response-async ((provider llm-openai) prompt
response-callback error-callback)
(llm-openai--chat-response provider prompt response-callback error-callback))
diff --git a/llm-vertex.el b/llm-vertex.el
index cbbf165e18..41fd97d1e9 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -71,7 +71,7 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(cl-defmethod llm-embedding-async ((provider llm-vertex) string
vector-callback error-callback)
(llm-vertex-refresh-key provider)
- (let ((resp (request (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
+ (request (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
llm-vertex-gcloud-region
(llm-vertex-project provider)
llm-vertex-gcloud-region
@@ -87,8 +87,8 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(cdr (assoc 'values (cdr (assoc
'embeddings (aref (cdr (assoc 'predictions data)) 0))))))))
:error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
(funcall error-callback
- (error (format "Problem calling
GCloud AI: %s"
- (cdr
error-thrown)))))))))))
+ (error (format "Problem calling
GCloud AI: %s (%S)"
+ (cdr error-thrown)
data)))))))
(cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt
response-callback error-callback)
(llm-vertex-refresh-key provider)
@@ -116,7 +116,7 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
request-alist))
(when (llm-chat-prompt-max-tokens prompt)
(push `("max_tokens" . ,(llm-chat-prompt-max-tokens prompt))
request-alist))
- (let ((resp (request (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
+ (request (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
llm-vertex-gcloud-region
(llm-vertex-project provider)
llm-vertex-gcloud-region
@@ -132,10 +132,10 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
:error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
(funcall error-callback 'error
(error (format "Problem
calling GCloud AI: %s, status: %s message: %s (%s)"
- 'error(cdr
error-thrown)
- (assoc-default
'status (assoc-default 'error data))
- (assoc-default
'message (assoc-default 'error data))
- data)))))))))))
+ (cdr
error-thrown)
+
(assoc-default 'status (assoc-default 'error data))
+
(assoc-default 'message (assoc-default 'error data))
+
data))))))))
(provide 'llm-vertex)
- [elpa] externals/llm b52958757a 18/34: Fix docstring wider than 80 characters in llm-vertex, (continued)
- [elpa] externals/llm b52958757a 18/34: Fix docstring wider than 80 characters in llm-vertex, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm abbff2aa9d 23/34: Change method name to llm-chat (without "-response"), update README, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm e94bc937c7 27/34: Fix issue with llm-chat before method having too many arguments, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 7edd36b2dc 28/34: Fix obsolete or incorrect function calls in llm-fake, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm d4bbe9d84c 29/34: Fix incorrect requires in openai and vertex implementations, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 723c0b3786 31/34: Minor README whitespace and formatting fixes, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 8f30feb5c1 32/34: README improvements, including noting the nonfree llm warning, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 444850a981 24/34: Fix missing word in non-free warning message, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 0ed280c208 15/34: Add llm-fake, useful for developer testing using the llm methods, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm c55ccf157a 03/34: Clean up package specifications in elisp files, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 414d25a625 09/34: Removed various unused things, and format fixes,
Andrew Hyatt <=
- [elpa] externals/llm 4e9be8183d 07/34: Merge branch 'async', Andrew Hyatt, 2023/09/16
- [elpa] externals/llm dd20d6353c 21/34: Fix bug on llm-fake's error response to chat-response, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm 40151757de 26/34: Switch to a method of nonfree warnings easier for provider modules, Andrew Hyatt, 2023/09/16
- [elpa] externals/llm ba65755326 30/34: Improve the README with information on providers for end-users, Andrew Hyatt, 2023/09/16