emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm dc5b036194 2/2: Add function calling for ollama (#5


From: ELPA Syncer
Subject: [elpa] externals/llm dc5b036194 2/2: Add function calling for ollama (#56)
Date: Sun, 28 Jul 2024 00:58:20 -0400 (EDT)

branch: externals/llm
commit dc5b03619493ad070b9fe625fd2a8826112df36c
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: GitHub <noreply@github.com>

    Add function calling for ollama (#56)
    
    * Add function calling to ollama
    
    * Note ollama function calling in the README
    
    * Add function calling to NEWS
    
    * Refine function calling capability to correct ollama models
---
 NEWS.org      |  1 +
 README.org    |  2 +-
 llm-ollama.el | 35 ++++++++++++++++++++++++--
 llm-test.el   | 12 +++++++++
 llm-tester.el | 80 ++++++++++++++++++++++++++++++++---------------------------
 5 files changed, 91 insertions(+), 39 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 064e08387a..90d8aa7b4c 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -1,4 +1,5 @@
 * Verseion 0.17.1
+- Support Ollama function calling, for models which support it.
 - Make sure every model, even unknown models, return some value for 
~llm-chat-token-limit~.
 - Add token count for llama3.1 model.
 * Version 0.17.0
diff --git a/README.org b/README.org
index 776161a68b..623beec925 100644
--- a/README.org
+++ b/README.org
@@ -61,7 +61,7 @@ In addition to the provider, which you may want multiple of 
(for example, to cha
 =:key=: The API key you get from 
[[https://console.anthropic.com/settings/keys][Claude's settings page]].  This 
is required.
 =:chat-model=: One of the 
[[https://docs.anthropic.com/claude/docs/models-overview][Claude models]].  
Defaults to "claude-3-opus-20240229", the most powerful model.
 ** Ollama
-[[https://ollama.ai/][Ollama]] is a way to run large language models locally. 
There are [[https://ollama.ai/library][many different models]] you can use with 
it. You set it up with the following parameters:
+[[https://ollama.ai/][Ollama]] is a way to run large language models locally. 
There are [[https://ollama.ai/library][many different models]] you can use with 
it, and some of them support function calling. You set it up with the following 
parameters:
 - ~:scheme~: The scheme (http/https) for the connection to ollama.  This 
default to "http".
 - ~:host~: The host that ollama is run on.  This is optional and will default 
to localhost.
 - ~:port~: The port that ollama is run on.  This is optional and will default 
to the default ollama port.
diff --git a/llm-ollama.el b/llm-ollama.el
index ab29541de9..f89a8889e4 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -114,6 +114,12 @@ PROVIDER is the llm-ollama provider."
             messages))
     (push `("messages" . ,messages) request-alist)
     (push `("model" . ,(llm-ollama-chat-model provider)) request-alist)
+    (when (and streaming (llm-chat-prompt-functions prompt))
+      (signal 'not-implemented
+              "Ollama does not support streaming with function calls"))
+    (when (llm-chat-prompt-functions prompt)
+      (push `("tools" . ,(mapcar #'llm-provider-utils-openai-function-spec
+                                 (llm-chat-prompt-functions prompt))) 
request-alist))
     (push `("stream" . ,(if streaming t :json-false)) request-alist)
     (when (llm-chat-prompt-temperature prompt)
       (push `("temperature" . ,(llm-chat-prompt-temperature prompt)) options))
@@ -123,6 +129,23 @@ PROVIDER is the llm-ollama provider."
     (when options (push `("options" . ,options) request-alist))
     request-alist))
 
+(cl-defmethod llm-provider-extract-function-calls ((_ llm-ollama) response)
+  (mapcar (lambda (call)
+            (let ((function (cdar call)))
+              (make-llm-provider-utils-function-call
+               :name (assoc-default 'name function)
+               :args (assoc-default 'arguments function))))
+          (assoc-default 'tool_calls (assoc-default 'message response))))
+
+(cl-defmethod llm-provider-populate-function-calls ((_ llm-ollama) prompt 
calls)
+  (llm-provider-utils-append-to-prompt
+   prompt
+   (mapcar (lambda (call)
+             `((function (name . ,(llm-provider-utils-function-call-name call))
+                         (arguments . ,(json-encode
+                                        (llm-provider-utils-function-call-args 
call))))))
+           calls)))
+
 (cl-defmethod llm-provider-streaming-media-handler ((_ llm-ollama) 
msg-receiver _ _)
   (cons 'application/x-ndjson
         (plz-media-type:application/x-ndjson
@@ -138,8 +161,16 @@ PROVIDER is the llm-ollama provider."
 (cl-defmethod llm-chat-token-limit ((provider llm-ollama))
   (llm-provider-utils-model-token-limit (llm-ollama-chat-model provider)))
 
-(cl-defmethod llm-capabilities ((_ llm-ollama))
-  (list 'streaming 'embeddings))
+(cl-defmethod llm-capabilities ((provider llm-ollama))
+  (append (list 'streaming 'embeddings)
+          ;; see https://ollama.com/search?c=tools
+          (when (string-match
+                 (rx (or "llama3.1" "mistral-nemo" "mistral-large"
+                         "mistral" "mixtral" "command-r-plus"
+                         "llama3-groq-tool-use"
+                         "firefunction-v2"))
+                 (llm-ollama-chat-model provider))
+            (list 'function-calls))))
 
 (provide 'llm-ollama)
 
diff --git a/llm-test.el b/llm-test.el
index 44b76c7b91..39c2a590d0 100644
--- a/llm-test.el
+++ b/llm-test.el
@@ -135,5 +135,17 @@
   (should (= 8192 (llm-chat-token-limit
                    (make-llm-gpt4all :chat-model "Mistral")))))
 
+(ert-deftest llm-test-ollama-function-calling-capabilities ()
+  ;; tests subject to change as models may get function calling
+  (cl-flet ((has-fc (model)
+              (member 'function-calls (llm-capabilities (make-llm-ollama 
:chat-model model)))))
+    (should (has-fc "llama3.1"))
+    (should (has-fc "llama3.1:8b-instruct-q8_0"))
+    (should (has-fc "mistral"))
+    (should-not (has-fc "gemma"))
+    (should-not (has-fc "gemma2"))
+    (should-not (has-fc "llama2"))
+    (should-not (has-fc "llama"))))
+
 (provide 'llm-test)
 ;;; llm-test.el ends here
diff --git a/llm-tester.el b/llm-tester.el
index fa7c6f8afa..5dcfd0d522 100644
--- a/llm-tester.el
+++ b/llm-tester.el
@@ -54,8 +54,8 @@
                              (if (eq (type-of embedding) 'vector)
                                  (if (> (length embedding) 0)
                                      (llm-tester-log "SUCCESS: Provider %s 
provided an embedding of length %d.  First 10 values: %S" (type-of provider)
-                                              (length embedding)
-                                              (seq-subseq embedding 0 (min 10 
(length embedding))))
+                                                     (length embedding)
+                                                     (seq-subseq embedding 0 
(min 10 (length embedding))))
                                    (llm-tester-log "ERROR: Provider %s 
returned an empty embedding" (type-of provider))))
                            (llm-tester-log "ERROR: Provider %s did not return 
any embedding" (type-of provider))))
                        (lambda (type message)
@@ -69,20 +69,20 @@
         (if (eq (type-of embedding) 'vector)
             (if (> (length embedding) 0)
                 (llm-tester-log "SUCCESS: Provider %s provided an embedding of 
length %d.  First 10 values: %S" (type-of provider)
-                         (length embedding)
-                         (seq-subseq embedding 0 (min 10 (length embedding))))
+                                (length embedding)
+                                (seq-subseq embedding 0 (min 10 (length 
embedding))))
               (llm-tester-log "ERROR: Provider %s returned an empty embedding" 
(type-of provider))))
       (llm-tester-log "ERROR: Provider %s did not return any embedding" 
(type-of provider)))))
 
 (defun llm-tester--tiny-prompt ()
   "Return prompt with a small amount of output, for testing purposes."
   (llm-make-chat-prompt
-      "Tell me a random cool feature of emacs."
-      :context "You must answer all questions as if you were the butler Jeeves 
from Jeeves and Wooster.  Start all interactions with the phrase, 'Very good, 
sir.'"
-      :examples '(("Tell me the capital of France." . "Very good, sir.  The 
capital of France is Paris, which I expect you to be familiar with, since you 
were just there last week with your Aunt Agatha.")
-                  ("Could you take me to my favorite place?" . "Very good, 
sir.  I believe you are referring to the Drone's Club, which I will take you to 
after you put on your evening attire."))
-      :temperature 0.5
-      :max-tokens 100))
+   "Tell me a random cool feature of emacs."
+   :context "You must answer all questions as if you were the butler Jeeves 
from Jeeves and Wooster.  Start all interactions with the phrase, 'Very good, 
sir.'"
+   :examples '(("Tell me the capital of France." . "Very good, sir.  The 
capital of France is Paris, which I expect you to be familiar with, since you 
were just there last week with your Aunt Agatha.")
+               ("Could you take me to my favorite place?" . "Very good, sir.  
I believe you are referring to the Drone's Club, which I will take you to after 
you put on your evening attire."))
+   :temperature 0.5
+   :max-tokens 100))
 
 (defun llm-tester-chat-async (provider)
   "Test that PROVIDER can interact with the LLM chat."
@@ -92,15 +92,15 @@
      provider
      (llm-tester--tiny-prompt)
      (lambda (response)
-         (unless (eq buf (current-buffer))
-           (llm-tester-log "ERROR: Provider %s returned a response not in the 
original buffer" (type-of provider)))
-         (if response
-             (if (> (length response) 0)
-                 (llm-tester-log "SUCCESS: Provider %s provided a response %s" 
(type-of provider) response)
-               (llm-tester-log "ERROR: Provider %s returned an empty response" 
(type-of provider)))
-           (llm-tester-log "ERROR: Provider %s did not return any response" 
(type-of provider))))
-       (lambda (type message)
-         (llm-tester-log "ERROR: Provider %s returned an error of type %s with 
message %s" (type-of provider) type message)))))
+       (unless (eq buf (current-buffer))
+         (llm-tester-log "ERROR: Provider %s returned a response not in the 
original buffer" (type-of provider)))
+       (if response
+           (if (> (length response) 0)
+               (llm-tester-log "SUCCESS: Provider %s provided a response %s" 
(type-of provider) response)
+             (llm-tester-log "ERROR: Provider %s returned an empty response" 
(type-of provider)))
+         (llm-tester-log "ERROR: Provider %s did not return any response" 
(type-of provider))))
+     (lambda (type message)
+       (llm-tester-log "ERROR: Provider %s returned an error of type %s with 
message %s" (type-of provider) type message)))))
 
 (defun llm-tester-chat-sync (provider)
   "Test that PROVIDER can interact with the LLM chat."
@@ -134,9 +134,9 @@
        (llm-tester-log "SUCCESS: Provider %s provided a streamed response in 
%d parts:\n%s" (type-of provider) counter streamed)
        (when (and (member 'streaming (llm-capabilities provider))
                   (not (string= streamed text)))
-           (llm-tester-log "ERROR: Provider %s returned a streamed response 
that was not equal to the final response.  Streamed text:\n%sFinal 
response:\n%s" (type-of provider) streamed text))
+         (llm-tester-log "ERROR: Provider %s returned a streamed response that 
was not equal to the final response.  Streamed text:\n%sFinal response:\n%s" 
(type-of provider) streamed text))
        (when (and (member 'streaming (llm-capabilities provider)) (= 0 
counter))
-           (llm-tester-log "WARNING: Provider %s returned no partial updates!" 
(type-of provider))))
+         (llm-tester-log "WARNING: Provider %s returned no partial updates!" 
(type-of provider))))
      (lambda (type message)
        (unless (eq buf (current-buffer))
          (llm-tester-log "ERROR: Provider %s returned a response not in the 
original buffer" (type-of provider)))
@@ -178,7 +178,7 @@
     (push (llm-chat provider prompt) outputs)
     (llm-tester-verify-prompt prompt)
     (llm-tester-log "SUCCESS: Provider %s provided a conversation with 
responses %s" (type-of provider)
-             (nreverse outputs))))
+                    (nreverse outputs))))
 
 (defun llm-tester-chat-conversation-async (provider)
   "Test that PROVIDER can handle a conversation."
@@ -262,10 +262,18 @@ of by calling the `describe_function' function."
 
 (defun llm-tester-function-calling-sync (provider)
   "Test that PROVIDER can call functions."
-  (let ((prompt (llm-tester-create-test-function-prompt)))
-    (llm-tester-log "SUCCESS: Provider %s called a function and got result %s"
-             (type-of provider)
-             (llm-chat provider prompt))))
+  (let ((prompt (llm-tester-create-test-function-prompt))
+        (result (llm-chat provider (llm-tester-create-test-function-prompt))))
+    (cond ((stringp result)
+           (llm-tester-log
+            "ERROR: Provider %s returned a string instead of a function result"
+            (type-of provider)))
+          ((and (listp result) (> (length result) 0))
+           (llm-tester-log "SUCCESS: Provider %s called a function and got a 
result %s"
+                           (type-of provider)
+                           result))
+          (t (llm-tester-log "ERROR: Provider %s returned a %s result: %s"
+                             (type-of provider) (type-of result) result)))))
 
 (defun llm-tester-function-calling-conversation-sync (provider)
   "Test that PROVIDER can call functions in a conversation."
@@ -280,8 +288,8 @@ of by calling the `describe_function' function."
     (push (llm-chat provider prompt) responses)
     (push (llm-chat provider prompt) responses)
     (llm-tester-log "SUCCESS: Provider %s had a function conversation and got 
results %s"
-             (type-of provider)
-             (nreverse responses))))
+                    (type-of provider)
+                    (nreverse responses))))
 
 (defun llm-tester-function-calling-async (provider)
   "Test that PROVIDER can call functions asynchronously."
@@ -289,10 +297,10 @@ of by calling the `describe_function' function."
     (llm-chat-async provider prompt
                     (lambda (result)
                       (llm-tester-log "SUCCESS: Provider %s called a function 
and got a result of %s"
-                               (type-of provider) result))
+                                      (type-of provider) result))
                     (lambda (type message)
                       (llm-tester-log "ERROR: Provider %s returned an error of 
type %s with message %s"
-                               (type-of provider) type message)))))
+                                      (type-of provider) type message)))))
 
 (defun llm-tester-function-calling-conversation-async (provider)
   "Test that PROVIDER can call functions in a conversation."
@@ -302,8 +310,8 @@ of by calling the `describe_function' function."
          (last-callback (lambda (result)
                           (push result responses)
                           (llm-tester-log "SUCCESS: Provider %s had an async 
function calling conversation, and got results %s"
-                                   (type-of provider)
-                                   (nreverse responses))))
+                                          (type-of provider)
+                                          (nreverse responses))))
          (third-callback (lambda (result) (push result responses)
                            (llm-chat-async provider prompt last-callback 
error-callback)))
          (second-callback (lambda (result) (push result responses)
@@ -323,12 +331,12 @@ of by calling the `describe_function' function."
        (cl-incf partial-counts))
      (lambda (text)
        (llm-tester-log "SUCCESS: Provider %s called a function and got a final 
result of %s"
-                (type-of provider) text)
+                       (type-of provider) text)
        (unless (= 0 partial-counts)
          (llm-tester-log "WARNING: Provider %s returned partial updates, but 
it shouldn't for function calling" (type-of provider))))
      (lambda (type message)
        (llm-tester-log "ERROR: Provider %s returned an error of type %s with 
message %s"
-                (type-of provider) type message)))))
+                       (type-of provider) type message)))))
 
 (defun llm-tester-cancel (provider)
   "Test that PROVIDER can do async which can be cancelled."
@@ -362,8 +370,8 @@ PROVIDER is the provider that is being tested."
       (llm-tester-log "ERROR: Provider %s returned an error on %s with a 
non-string message %s with type %s" (type-of provider) call message type))
      ((string-match-p "Unknown Error" message)
       (llm-tester-log "ERROR: Provider %s returned a message on %s with 
'Unknown Error' instead of more specific error message" (type-of provider) 
call))
-      (t
-       (llm-tester-log "SUCCESS: Provider %s on %s returned an error of type 
%s with message %s" (type-of provider) call type message)))))
+     (t
+      (llm-tester-log "SUCCESS: Provider %s on %s returned an error of type %s 
with message %s" (type-of provider) call type message)))))
 
 (defun llm-tester-bad-provider-async (provider)
   "When PROVIDER is bad in a some way, test error handling."



reply via email to

[Prev in Thread] Current Thread [Next in Thread]