diff --git a/.github/workflows/chron-models.yaml b/.github/workflows/chron-models.yaml
index f890077..578ed61 100644
--- a/.github/workflows/chron-models.yaml
+++ b/.github/workflows/chron-models.yaml
@@ -1,4 +1,4 @@
-name: Commit Go Mod, Go Work, and Docs
+name: Commit Go Generated Content
on:
workflow_dispatch:
@@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
- go-version: '1.23.1'
+ go-version: '1.23.2'
# Step 3: Run go mod download
- name: Run go mod download
diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml
index a3bf780..b5a75a8 100644
--- a/.github/workflows/coverage.yaml
+++ b/.github/workflows/coverage.yaml
@@ -8,17 +8,13 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.23.1'
+ go-version: '1.23.2'
- name: Check out code
uses: actions/checkout@v3
- name: Install dependencies
run: |
go mod download
- name: Run Integration tests
- env:
- GROQ_KEY: ${{ secrets.GROQ_KEY }}
- TOOLHOUSE_API_KEY: ${{ secrets.TOOLHOUSE_API_KEY }}
- E2B_API_KEY: ${{ secrets.E2B_API_KEY }}
run: |
go test -race -tags=integration ./...
- name: Run Unit tests
@@ -35,8 +31,3 @@ jobs:
env:
COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: goveralls -coverprofile=covprofile -service=github
- # or use shogo82148/actions-goveralls
- # - name: Send coverage
- # uses: shogo82148/actions-goveralls@v1
- # with:
- # path-to-profile: covprofile
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
index 44709e3..a5e38ff 100644
--- a/.github/workflows/lint.yaml
+++ b/.github/workflows/lint.yaml
@@ -25,7 +25,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: '1.23.1'
+ go-version: '1.23.2'
cache: true
- name: Install requirements
id: install-lint-requirements
diff --git a/.github/workflows/unit.yaml b/.github/workflows/unit.yaml
new file mode 100644
index 0000000..c8a947c
--- /dev/null
+++ b/.github/workflows/unit.yaml
@@ -0,0 +1,32 @@
+name: Unit Tests
+on:
+ workflow_dispatch: {}
+jobs:
+ test:
+ name: Test with Coverage
+ runs-on: ubuntu-latest
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: '1.23.2'
+ - name: Check out code
+ uses: actions/checkout@v3
+ - name: Install dependencies
+ run: |
+ go mod download
+ - name: Run Integration tests
+ env:
+ GROQ_KEY: ${{ secrets.GROQ_KEY }}
+ TOOLHOUSE_API_KEY: ${{ secrets.TOOLHOUSE_API_KEY }}
+ E2B_API_KEY: ${{ secrets.E2B_API_KEY }}
+ run: |
+ go test -race -tags=integration ./...
+ - name: Run Unit tests
+ env:
+ GROQ_KEY: ${{ secrets.GROQ_KEY }}
+ TOOLHOUSE_API_KEY: ${{ secrets.TOOLHOUSE_API_KEY }}
+ E2B_API_KEY: ${{ secrets.E2B_API_KEY }}
+ UNIT: true
+ run: |
+ go test -race -covermode atomic -coverprofile=covprofile ./...
diff --git a/README.md b/README.md
index 572b111..9429ab8 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,13 @@
[![Coverage Status](https://coveralls.io/repos/github/conneroisu/groq-go/badge.svg?branch=main)](https://coveralls.io/github/conneroisu/groq-go?branch=main)
[![PhormAI](https://img.shields.io/badge/Phorm-Ask_AI-%23F2777A.svg?&logo=data:image/svg+xml)](https://www.phorm.ai/query?projectId=0634251d-5a98-4c37-ac2f-385b588ce3d3)
+
+
+
+
## Features
- Supports all models from [Groq](https://wow.groq.com/) in a type-safe way.
diff --git a/audio_test.go b/audio_test.go
index 22c7510..60094c6 100644
--- a/audio_test.go
+++ b/audio_test.go
@@ -1,6 +1,3 @@
-//go:build !test
-// +build !test
-
package groq
import (
diff --git a/chat.go b/chat.go
index 3e3b1b0..1a8ed46 100644
--- a/chat.go
+++ b/chat.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/conneroisu/groq-go/pkg/builders"
+ "github.com/conneroisu/groq-go/pkg/tools"
)
const (
@@ -29,7 +30,6 @@ const (
ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" // ChatCompletionResponseFormatTypeJSONObject is the json object chat completion response format type.
ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" // ChatCompletionResponseFormatTypeJSONSchema is the json schema chat completion response format type.
ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" // ChatCompletionResponseFormatTypeText is the text chat completion response format type.
- ToolTypeFunction ToolType = "function" // ToolTypeFunction is the function tool type.
FinishReasonStop FinishReason = "stop" // FinishReasonStop is the stop finish reason.
FinishReasonLength FinishReason = "length" // FinishReasonLength is the length finish reason.
FinishReasonFunctionCall FinishReason = "function_call" // FinishReasonFunctionCall is the function call finish reason.
@@ -69,26 +69,13 @@ type (
}
// ChatCompletionMessage represents the chat completion message.
ChatCompletionMessage struct {
- Name string `json:"name"` // Name is the name of the chat completion message.
- Role Role `json:"role"` // Role is the role of the chat completion message.
- Content string `json:"content"` // Content is the content of the chat completion message.
- MultiContent []ChatMessagePart `json:"-"` // MultiContent is the multi content of the chat completion message.
- FunctionCall *FunctionCall `json:"function_call,omitempty"` // FunctionCall setting for Role=assistant prompts this may be set to the function call generated by the model.
- ToolCalls []ToolCall `json:"tool_calls,omitempty"` // ToolCalls setting for Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
- ToolCallID string `json:"tool_call_id,omitempty"` // ToolCallID is setting for Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
- }
- // ToolCall represents a tool call.
- ToolCall struct {
- // Index is not nil only in chat completion chunk object
- Index *int `json:"index,omitempty"` // Index is the index of the tool call.
- ID string `json:"id"` // ID is the id of the tool call.
- Type ToolType `json:"type"` // Type is the type of the tool call.
- Function FunctionCall `json:"function"` // Function is the function of the tool call.
- }
- // FunctionCall represents a function call.
- FunctionCall struct {
- Name string `json:"name,omitempty"` // Name is the name of the function call.
- Arguments string `json:"arguments,omitempty"` // Arguments is the arguments of the function call in JSON format.
+ Name string `json:"name"` // Name is the name of the chat completion message.
+ Role Role `json:"role"` // Role is the role of the chat completion message.
+ Content string `json:"content"` // Content is the content of the chat completion message.
+ MultiContent []ChatMessagePart `json:"-"` // MultiContent is the multi content of the chat completion message.
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"` // FunctionCall setting for Role=assistant prompts this may be set to the function call generated by the model.
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"` // ToolCalls setting for Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
+ ToolCallID string `json:"tool_call_id,omitempty"` // ToolCallID is setting for Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
}
// ChatCompletionResponseFormatType is the chat completion response format type.
//
@@ -138,29 +125,20 @@ type (
LogProbs bool `json:"logprobs,omitempty"` // LogProbs indicates whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model.
TopLogProbs int `json:"top_logprobs,omitempty"` // TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
User string `json:"user,omitempty"` // User is the user of the chat completion request.
- Tools []Tool `json:"tools,omitempty"` // Tools is the tools of the chat completion request.
+ Tools []tools.Tool `json:"tools,omitempty"` // Tools is the tools of the chat completion request.
ToolChoice any `json:"tool_choice,omitempty"` // This can be either a string or an ToolChoice object.
StreamOptions *StreamOptions `json:"stream_options,omitempty"` // Options for streaming response. Only set this when you set stream: true.
ParallelToolCalls any `json:"parallel_tool_calls,omitempty"` // Disable the default behavior of parallel tool calls by setting it: false.
RetryDelay time.Duration `json:"-"` // RetryDelay is the delay between retries.
}
- // ToolType is the tool type.
- //
- // string
- ToolType string
- // Tool represents the tool.
- Tool struct {
- Type ToolType `json:"type"` // Type is the type of the tool.
- Function FunctionDefinition `json:"function,omitempty"` // Function is the tool's functional definition.
- }
- // ToolChoice represents the tool choice.
- ToolChoice struct {
- Type ToolType `json:"type"` // Type is the type of the tool choice.
- Function ToolFunction `json:"function,omitempty"` // Function is the function of the tool choice.
- }
- // ToolFunction represents the tool function.
- ToolFunction struct {
- Name string `json:"name"` // Name is the name of the tool function.
+ // LogProbs is the top-level structure containing the log probability information.
+ LogProbs struct {
+ Content []struct {
+ Token string `json:"token"` // Token is the token of the log prob.
+ LogProb float64 `json:"logprob"` // LogProb is the log prob of the log prob.
+ Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null
+ TopLogProbs []TopLogProbs `json:"top_logprobs"` // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
+ } `json:"content"` // Content is a list of message content tokens with log probability information.
}
// TopLogProbs represents the top log probs.
TopLogProbs struct {
@@ -168,17 +146,6 @@ type (
LogProb float64 `json:"logprob"` // LogProb is the log prob of the top log probs.
Bytes []byte `json:"bytes,omitempty"` // Bytes is the bytes of the top log probs.
}
- // LogProb represents the probability information for a token.
- LogProb struct {
- Token string `json:"token"` // Token is the token of the log prob.
- LogProb float64 `json:"logprob"` // LogProb is the log prob of the log prob.
- Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null
- TopLogProbs []TopLogProbs `json:"top_logprobs"` // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
- }
- // LogProbs is the top-level structure containing the log probability information.
- LogProbs struct {
- Content []LogProb `json:"content"` // Content is a list of message content tokens with log probability information.
- }
// FinishReason is the finish reason.
// string
FinishReason string
@@ -214,10 +181,10 @@ type (
}
// ChatCompletionStreamChoiceDelta represents a response structure for chat completion API.
ChatCompletionStreamChoiceDelta struct {
- Content string `json:"content,omitempty"`
- Role string `json:"role,omitempty"`
- FunctionCall *FunctionCall `json:"function_call,omitempty"`
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
+ Content string `json:"content,omitempty"`
+ Role string `json:"role,omitempty"`
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"`
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"`
}
// ChatCompletionStreamChoice represents a response structure for chat completion API.
ChatCompletionStreamChoice struct {
@@ -225,10 +192,6 @@ type (
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
FinishReason FinishReason `json:"finish_reason"`
}
- // PromptFilterResult represents a response structure for chat completion API.
- PromptFilterResult struct {
- Index int `json:"index"`
- }
streamer interface {
ChatCompletionStreamResponse
}
@@ -242,14 +205,16 @@ type (
}
// ChatCompletionStreamResponse represents a response structure for chat completion API.
ChatCompletionStreamResponse struct {
- ID string `json:"id"` // ID is the identifier for the chat completion stream response.
- Object string `json:"object"` // Object is the object type of the chat completion stream response.
- Created int64 `json:"created"` // Created is the creation time of the chat completion stream response.
- Model ChatModel `json:"model"` // Model is the model used for the chat completion stream response.
- Choices []ChatCompletionStreamChoice `json:"choices"` // Choices is the choices for the chat completion stream response.
- SystemFingerprint string `json:"system_fingerprint"` // SystemFingerprint is the system fingerprint for the chat completion stream response.
- PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"` // PromptAnnotations is the prompt annotations for the chat completion stream response.
- PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"` // PromptFilterResults is the prompt filter results for the chat completion stream response.
+ ID string `json:"id"` // ID is the identifier for the chat completion stream response.
+ Object string `json:"object"` // Object is the object type of the chat completion stream response.
+ Created int64 `json:"created"` // Created is the creation time of the chat completion stream response.
+ Model ChatModel `json:"model"` // Model is the model used for the chat completion stream response.
+ Choices []ChatCompletionStreamChoice `json:"choices"` // Choices is the choices for the chat completion stream response.
+ SystemFingerprint string `json:"system_fingerprint"` // SystemFingerprint is the system fingerprint for the chat completion stream response.
+ PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"` // PromptAnnotations is the prompt annotations for the chat completion stream response.
+ PromptFilterResults []struct {
+ Index int `json:"index"`
+ } `json:"prompt_filter_results,omitempty"` // PromptFilterResults is the prompt filter results for the chat completion stream response.
// Usage is an optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
//
// When present, it contains a null value except for the last chunk which contains the token usage statistics
@@ -262,24 +227,6 @@ type (
ChatCompletionStream struct {
*streamReader[ChatCompletionStreamResponse]
}
- // FunctionDefinition represents the function definition.
- FunctionDefinition struct {
- Name string `json:"name"`
- Description string `json:"description"`
- Parameters ParameterDefinition `json:"parameters"`
- }
- // ParameterDefinition represents the parameter definition.
- ParameterDefinition struct {
- Type string `json:"type"`
- Properties map[string]PropertyDefinition `json:"properties"`
- Required []string `json:"required"`
- AdditionalProperties bool `json:"additionalProperties,omitempty"`
- }
- // PropertyDefinition represents the property definition.
- PropertyDefinition struct {
- Type string `json:"type"`
- Description string `json:"description"`
- }
)
// MarshalJSON method implements the json.Marshaler interface.
@@ -289,24 +236,24 @@ func (m ChatCompletionMessage) MarshalJSON() ([]byte, error) {
}
if len(m.MultiContent) > 0 {
msg := struct {
- Name string `json:"name,omitempty"`
- Role Role `json:"role"`
- Content string `json:"-"`
- MultiContent []ChatMessagePart `json:"content,omitempty"`
- FunctionCall *FunctionCall `json:"function_call,omitempty"`
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
- ToolCallID string `json:"tool_call_id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Role Role `json:"role"`
+ Content string `json:"-"`
+ MultiContent []ChatMessagePart `json:"content,omitempty"`
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"`
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"`
+ ToolCallID string `json:"tool_call_id,omitempty"`
}(m)
return json.Marshal(msg)
}
msg := struct {
- Name string `json:"name,omitempty"`
- Role Role `json:"role"`
- Content string `json:"content"`
- MultiContent []ChatMessagePart `json:"-"`
- FunctionCall *FunctionCall `json:"function_call,omitempty"`
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
- ToolCallID string `json:"tool_call_id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Role Role `json:"role"`
+ Content string `json:"content"`
+ MultiContent []ChatMessagePart `json:"-"`
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"`
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"`
+ ToolCallID string `json:"tool_call_id,omitempty"`
}(m)
return json.Marshal(msg)
}
@@ -318,9 +265,9 @@ func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) (err error) {
Role Role `json:"role"`
Content string `json:"content"`
MultiContent []ChatMessagePart
- FunctionCall *FunctionCall `json:"function_call,omitempty"`
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
- ToolCallID string `json:"tool_call_id,omitempty"`
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"`
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"`
+ ToolCallID string `json:"tool_call_id,omitempty"`
}{}
err = json.Unmarshal(bs, &msg)
if err == nil {
@@ -331,10 +278,10 @@ func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) (err error) {
Name string `json:"name,omitempty"`
Role Role `json:"role"`
Content string
- MultiContent []ChatMessagePart `json:"content"`
- FunctionCall *FunctionCall `json:"function_call,omitempty"`
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
- ToolCallID string `json:"tool_call_id,omitempty"`
+ MultiContent []ChatMessagePart `json:"content"`
+ FunctionCall *tools.FunctionCall `json:"function_call,omitempty"`
+ ToolCalls []tools.ToolCall `json:"tool_calls,omitempty"`
+ ToolCallID string `json:"tool_call_id,omitempty"`
}{}
err = json.Unmarshal(bs, &multiMsg)
if err != nil {
diff --git a/examples/composio-github-star/README.md b/examples/composio-github-star/README.md
new file mode 100644
index 0000000..898ae8d
--- /dev/null
+++ b/examples/composio-github-star/README.md
@@ -0,0 +1,21 @@
+# composio-github-star
+
+Adapted from the [quickstart](https://docs.composio.dev/introduction/intro/quickstart) guide.
+
+Install the `composio` CLI and login to your account (also add github to your account if you haven't already)
+
+```bash
+pip install -U composio_core composio_openai
+
+composio login
+
+#Connect your Github so agents can use it
+composio add github
+```
+
+Congratulations! You’ve just:
+
+ 🔐 Authenticated your GitHub account with Composio
+ 🛠 Fetched GitHub tools for the llm
+ ⭐ Instructed the AI to star the conneroisu/groq-go repository
+ ✅ Successfully executed the action on GitHub
diff --git a/examples/composio-github-star/main.go b/examples/composio-github-star/main.go
new file mode 100644
index 0000000..19a230a
--- /dev/null
+++ b/examples/composio-github-star/main.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+
+ "github.com/conneroisu/groq-go"
+ "github.com/conneroisu/groq-go/extensions/composio"
+ "github.com/conneroisu/groq-go/pkg/test"
+)
+
+func main() {
+ if err := run(context.Background()); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func run(
+ ctx context.Context,
+) error {
+ key, err := test.GetAPIKey("GROQ_KEY")
+ if err != nil {
+ return err
+ }
+ client, err := groq.NewClient(key)
+ if err != nil {
+ return err
+ }
+ key, err = test.GetAPIKey("COMPOSIO_API_KEY")
+ if err != nil {
+ return err
+ }
+ comp, err := composio.NewComposer(
+ key,
+ composio.WithLogger(slog.Default()),
+ )
+ if err != nil {
+ return err
+ }
+ tools, err := comp.GetTools(
+ ctx,
+ composio.WithApp("GITHUB"),
+ composio.WithUseCase("star-repo"),
+ )
+ if err != nil {
+ return err
+ }
+ chat, err := client.CreateChatCompletion(ctx, groq.ChatCompletionRequest{
+ Model: groq.ModelLlama3Groq70B8192ToolUsePreview,
+ Messages: []groq.ChatCompletionMessage{
+ {
+ Role: groq.ChatMessageRoleUser,
+ Content: `
+You are a github star bot. You will be given a repo name and you will star it.
+Star the repo conneroisu/groq-go on GitHub.
+`,
+ },
+ },
+ MaxTokens: 2000,
+ Tools: tools,
+ })
+ if err != nil {
+ return err
+ }
+ resp, err := comp.Run(ctx, chat)
+ if err != nil {
+ return err
+ }
+ fmt.Println(resp)
+ return nil
+}
diff --git a/examples/e2b-go-project/README.md b/examples/e2b-go-project/README.md
new file mode 100644
index 0000000..ecb18f5
--- /dev/null
+++ b/examples/e2b-go-project/README.md
@@ -0,0 +1,33 @@
+# e2b-go-project
+
+This is an example of using groq-go to create a simple golang project using the e2b and groq api powered by the groq-go library.
+
+## Usage
+
+Make sure you have a groq key set in the environment variable `GROQ_KEY`.
+Also, make sure that you have a e2b api key set in the environment variable `E2B_API_KEY`.
+
+```bash
+export GROQ_KEY=your-groq-key
+export E2B_API_KEY=your-e2b-api-key
+go run .
+```
+
+### System Prompt
+
+```txt
+Given the tools given to you, create a golang project with the following files:
+
+
+main.go
+utils.go
+
+
+The main function should call the `utils.run() error` function.
+
+The project should, when run, print the following:
+
+