openai

package module
v0.0.0-...-f0fa00b Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 28, 2024 License: MPL-2.0 Imports: 13 Imported by: 4

README

OpenAI Go Reference Go Report Card

An unofficial community-maintained Go client package and CLI for OpenAI.

Installation

To use this package in your own Go project:

$ go get github.com/picatz/openai@latest

To use the openai CLI:

$ go install github.com/picatz/openai/cmd/openai@latest

[!IMPORTANT] To use the CLI you must have a valid OPENAI_API_KEY environment variable set. You can get one here.

[!TIP] You can customize which model is used by setting the OPENAI_MODEL environment variable. The default is gpt-4-turbo-preview today, but it may change in the future.

Usage

import "github.com/picatz/openai"

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
Assistants API
assistant, _ := client.CreateAssistant(ctx, &openai.CreateAssistantRequest{
	Model:        openai.ModelGPT4TurboPreview,
	Instructions: "You are a helpful assistant for all kinds of tasks. Answer as concisely as possible.",
	Name:         "openai-cli-assistant",
	Description:  "A helpful assistant for all kinds of tasks.",
	Tools: []map[string]any{
		{
			"type": "code_interpreter",
		},
		{
			"type": "retrieval",
		},
		// {
		// 	"type": "function",
		//  ...
		// },
	},
})

thread, _ := client.CreateThread(ctx, nil)

client.CreateMessage(ctx, &openai.CreateMessageRequest{
	ThreadID: thread.ID,
	Role:     openai.ChatRoleUser,
	Content:  input,
})

runResp, _ := client.CreateRun(ctx, &openai.CreateRunRequest{
	ThreadID:    thread.ID,
	AssistantID: assistant.ID,
})

openai.WaitForRun(ctx, client, thread.ID, runResp.ID, 700*time.Millisecond)

listResp, _ := client.ListMessages(ctx, &openai.ListMessagesRequest{
	ThreadID: thread.ID,
	Limit:    1,
})

fmt.Println(listResp.Data[0].Content[0].Text())
Chat API
var history []openai.ChatMessage{
	{
		Role:    openai.ChatRoleSystem,
		Content: "You are a helpful assistant for this example.",
	},
	{
		Role:    openai.ChatRoleUser,
		Content: "Hello!", // Get input from user.
	},
}

resp, _ := client.CreateChat(ctx, &openai.CreateChatRequest{
	Model: openai.ModelGPT35Turbo,
	Messages: history,
})

fmt.Println(resp.Choices[0].Message.Content)
// Hello how may I help you today?

// Update history, summarize, forget, etc. Then repeat.
history = appened(history, resp.Choices[0].Message)
openai CLI

Use OpenAI's chat or edit and completion features on the command-line.

$ go install github.com/picatz/openai/cmd/openai@latest
Usage
$ openai --help
OpenAI CLI

Usage:
  openai [flags]
  openai [command]

Available Commands:
  assistant   Start an interactive assistant chat session
  chat        Chat with the OpenAI API
  completion  Generate the autocompletion script for the specified shell
  help        Help about any command
  image       Generate an image with DALL·E

Flags:
  -h, --help   help for openai

Use "openai [command] --help" for more information about a command.
$ openai assistant

Welcome to the OpenAI API CLI assistant mode!
                                             
WARNING: Messages and files disappear after exiting.

> Hello!

Hello there! How may I assist you today?

...

[!TIP] If no subcommand (like assistant or chat) is provided, the CLI will default to assistant mode.

$ openai chat

Welcome to the OpenAI API CLI chat mode. Type 'exit' to quit.

> Hello!

Hello there! How may I assist you today?

...

Documentation

Overview

Package openai provides a client for the OpenAI API.

https://beta.openai.com/docs/api-reference

Index

Examples

Constants

This section is empty.

Variables

View Source
var (
	FunctionCallAuto = FunctionCallControlAuto{}
	FunctionCallNone = FunctionCallControlNone{}
)
View Source
var RateLimits = NewRateLimiters()

RateLimits is the default rate limiters for the OpenAI API.

Multiple Organizations

If using multiple organizations, users should create their own rate limiters using the `NewRateLimiters()` function.

Functions

func FunctionCallArgumentValue

func FunctionCallArgumentValue[T any](name string, args FunctionCallArguments) (T, error)

FunctionCallArgumentValue returns the value of the argument with the given name.

func WaitForRun

func WaitForRun(ctx context.Context, client *Client, threadID, runID string, interval time.Duration) error

WaitForRun polls the API at the given inter until the run is completed, failed, cancelled, or expired.

It returns nil if the run completed successfully, or an error if the run failed, was cancelled, or expired.

Types

type Assistant

type Assistant struct {
	ID           string           `json:"id"`
	Object       string           `json:"object"`
	Created      int              `json:"created"`
	Name         string           `json:"name"`
	Description  string           `json:"description"`
	Model        string           `json:"model"`
	Instructions string           `json:"instructions"`
	Tools        []map[string]any `json:"tools"`
	FileIDs      []string         `json:"file_ids"`
	Metadata     map[string]any   `json:"metadata"`
}

https://platform.openai.com/docs/api-reference/assistants/object

type AssistantFile

type AssistantFile struct {
	ID          string `json:"id"`
	Object      string `json:"object"`
	Created     int    `json:"created"`
	AssistantID string `json:"assistant_id"`
}

https://platform.openai.com/docs/api-reference/assistants/file-object

type AssistantToolOutput

type AssistantToolOutput struct {
	CallID string `json:"tool_call_id,omitempty"`
	Output string `json:"output,omitempty"`
}

type AudioTranscriptableFile

type AudioTranscriptableFile interface {
	io.ReadCloser
	Name() string
}

func NewAudioTranscriptableFileFromReadCloser

func NewAudioTranscriptableFileFromReadCloser(rc io.ReadCloser, name string) AudioTranscriptableFile

type AudioTranscriptionFile

type AudioTranscriptionFile struct {
	ReadCloser *AudioTranscriptionFileReadCloser

	File *os.File
}

AudioTranscriptionFile is a file to be used in a CreateAudioTranscriptionRequest, allowing a caller to provide various types of file types.

Only provide one of the fields in this struct.

https://platform.openai.com/docs/api-reference/audio/create#audio/create-file

type AudioTranscriptionFileReadCloser

type AudioTranscriptionFileReadCloser struct {
	io.ReadCloser
	// contains filtered or unexported fields
}

func (*AudioTranscriptionFileReadCloser) Name

type CancelFineTuneRequest

type CancelFineTuneRequest struct {
	ID string `json:"id"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/cancel

type CancelFineTuneResponse

type CancelFineTuneResponse struct {
	ID              string `json:"id"`
	Object          string `json:"object"`
	Model           string `json:"model"`
	CreatedAt       int    `json:"created_at"`
	Events          []any  `json:"events"`
	FineTunedModel  any    `json:"fine_tuned_model"`
	Hyperparams     any    `json:"hyperparams"`
	OrganizationID  string `json:"organization_id"`
	ResultFiles     []any  `json:"result_files"`
	Status          string `json:"status"`
	ValidationFiles []any  `json:"validation_files"`
	TrainingFiles   []struct {
		ID        string `json:"id"`
		Object    string `json:"object"`
		Bytes     int    `json:"bytes"`
		CreatedAt int    `json:"created_at"`
		Filename  string `json:"filename"`
		Purpose   string `json:"purpose"`
	} `json:"training_files"`
	UpdatedAt int `json:"updated_at"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/cancel

type ChatMessage

type ChatMessage struct {
	// Role is the role of the message, e.g. "user" or "bot".
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-role
	//
	// Required.
	Role string `json:"role"`

	// Content is the text of the message.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-content
	//
	// Optional.
	Content string `json:"content"`

	// Name is the author of this message. It is required if role is function,
	// and it should be the name of the function whose response is in the content.
	//
	// May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-name
	//
	// Optional.
	Name string `json:"name,omitempty"`

	// FunctionCall the name and arguments of a function that should be called,
	// as generated by the model.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call
	//
	// Optional.
	FunctionCall *FunctionCall `json:"function_call,omitempty"`
}

type ChatMessageStreamChunk

type ChatMessageStreamChunk struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Model   string `json:"model"`
	Choices []struct {
		// Delta is either for role or content.
		Delta struct {
			Role    *string `json:"role"`
			Content *string `json:"content"`
		} `json:"delta"`
		Index        int `json:"index"`
		FinishReason any `json:"finish_reason"`
	} `json:"choices"`
}

func (*ChatMessageStreamChunk) ContentDelta

func (c *ChatMessageStreamChunk) ContentDelta() bool

Content returns the content of the message, or an error if there are no choices.

func (*ChatMessageStreamChunk) FirstChoice

func (c *ChatMessageStreamChunk) FirstChoice() (string, error)

Content returns the content of the message, or an error if there are no choices.

type ChatRole

type ChatRole = string

ChatRole is a role that can be used in a chat session, either “system”, “user”, or “assistant”.

https://platform.openai.com/docs/guides/chat/introduction

const (
	// ChatRoleUser is a user role.
	ChatRoleUser ChatRole = "user"

	// ChatRoleSystem is a system role.
	ChatRoleSystem ChatRole = "system"

	// ChatRoleAssistant is an assistant role.
	ChatRoleAssistant ChatRole = "assistant"
)

type Client

type Client struct {
	// APIKey is the API key to use for requests.
	APIKey string

	// HTTPClient is the HTTP client to use for requests.
	HTTPClient *http.Client

	// Organization is the organization to use for requests.
	Organization string
}

Client is a client for the OpenAI API.

https://platform.openai.com/docs/api-reference

func NewClient

func NewClient(apiKey string, opts ...ClientOption) *Client

NewClient returns a new Client with the given API key.

Example

c := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

func (*Client) CreateAudioTranscription

CreateAudioTranscription transcribes audio into the input language.

https://platform.openai.com/docs/api-reference/audio/create

func (*Client) CreateChat

func (c *Client) CreateChat(ctx context.Context, req *CreateChatRequest) (*CreateChatResponse, error)

CreateChat sends a chat request to the API to obtain a chat response, creating a completion for the included chat messages (the conversation context and history).

Example

var history []openai.ChatMessage{
 	{
 		Role:    openai.ChatRoleSystem,
 		Content: "You are a helpful assistant for this example.",
 	},
 	{
 		Role:    openai.ChatRoleUser,
 		Content: "Hello!", // Get input from user.
  	},
 }

resp, _ := client.CreateChat(ctx, &openai.CreateChatRequest{
	Model: openai.ModelGPT35Turbo,
	Messages: history,
})

fmt.Println(resp.Choices[0].Message.Content)
// Hello how may I help you today?

// Update history, summarize, forget, etc. Then repeat.
history = appened(history, resp.Choices[0].Message)

https://platform.openai.com/docs/api-reference/chat/create

Example
package main

import (
	"context"
	"fmt"
	"os"
	"strings"

	"github.com/picatz/openai"
)

func main() {
	c := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

	ctx := context.Background()

	messages := []openai.ChatMessage{
		{
			Role:    openai.ChatRoleSystem,
			Content: "You are a helpful assistant familiar with children's stories, and answer in only single words.",
		},
		{
			Role:    "user",
			Content: "Clifford is a big dog, but what color is he?",
		},
	}

	resp, err := c.CreateChat(ctx, &openai.CreateChatRequest{
		Model:    openai.ModelGPT35Turbo,
		Messages: messages,
	})
	if err != nil {
		panic(err)
	}

	fmt.Println(strings.ToLower(strings.TrimRight(strings.TrimSpace(resp.Choices[0].Message.Content), ".")))
}
Output:

red

func (*Client) CreateCompletion deprecated

func (c *Client) CreateCompletion(ctx context.Context, req *CreateCompletionRequest) (*CreateCompletionResponse, error)

CreateCompletion performs a "completion" request using the OpenAI API.

Warning

The completions API endpoint received its final update in July 2023 and has a different interface than the new chat completions endpoint. Instead of the input being a list of messages, the input is a freeform text string called a prompt.

Example

 resp, _ := client.CreateCompletion(ctx, &openai.CreateCompletionRequest{
	Model: openai.ModelDavinci,
	Prompt: []string{"Once upon a time"},
	MaxTokens: 16,
 })

Deprecated: github.com/picatz/openai.Client.CreateCompletion is deprecated (legacy). Use github.com/picatz/openai.Client.CreateChat instead.

https://platform.openai.com/docs/api-reference/completions/create

Example
package main

import (
	"context"
	"fmt"
	"os"

	"github.com/picatz/openai"
)

func main() {
	c := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

	ctx := context.Background()

	resp, err := c.CreateCompletion(ctx, &openai.CreateCompletionRequest{
		Model:     openai.ModelDavinci,
		Prompt:    []string{"The cow jumped over the"},
		MaxTokens: 1,
		N:         1,
	})

	if err != nil {
		panic(err)
	}

	fmt.Println(resp.Choices[0].Text)
}
Output:

moon

func (*Client) CreateEdit deprecated

func (c *Client) CreateEdit(ctx context.Context, req *CreateEditRequest) (*CreateEditResponse, error)

CreateEdit performs a "edit" request using the OpenAI API.

Warning

Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001) will need to migrate to GPT-3.5 Turbo by January 4, 2024.

Example

resp, _ := client.CreateEdit(ctx, &CreateEditRequest{
	Model:       openai.ModelTextDavinciEdit001,
	Instruction: "Change the word 'test' to 'example'",
	Input:       "This is a test",
})

Deprecated: github.com/picatz/openai.Client.CreateEdit is deprecated (legacy). Use github.com/picatz/openai.Client.CreateChat instead.

https://platform.openai.com/docs/api-reference/edits/create

Example
package main

import (
	"context"
	"fmt"
	"os"
	"strings"

	"github.com/picatz/openai"
)

func main() {
	c := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

	ctx := context.Background()

	resp, err := c.CreateEdit(ctx, &openai.CreateEditRequest{
		Model:       openai.ModelTextDavinciEdit001,
		Instruction: "ONLY change the word 'test' to 'example', with no other changes",
		Input:       "This is a test",
	})

	if err != nil {
		panic(err)
	}

	// Get the words from the response.
	words := strings.Split(resp.Choices[0].Text, " ")

	fmt.Println(words[len(words)-1])
}
Output:

example

func (*Client) CreateEmbedding

func (c *Client) CreateEmbedding(ctx context.Context, req *CreateEmbeddingRequest) (*CreateEmbeddingResponse, error)

CreateEmbedding performs a "embedding" request using the OpenAI API.

Example

resp, _ := c.CreateEmbedding(ctx, &openai.CreateEmbeddingRequest{
	Model: openai.ModelTextEmbeddingAda002,
	Input: "The food was delicious and the waiter...",
})

https://platform.openai.com/docs/api-reference/embeddings

func (*Client) CreateImage

func (c *Client) CreateImage(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error)

CreateImage performs a "image" request using the OpenAI API.

Example

resp, _ := c.CreateImage(ctx, &openai.CreateImageRequest{
	Prompt:         "Golang-style gopher mascot wearing an OpenAI t-shirt",
	N:              1,
	Size:           "256x256",
	ResponseFormat: "url",
})

https://platform.openai.com/docs/api-reference/images/create

func (*Client) CreateModeration

func (c *Client) CreateModeration(ctx context.Context, req *CreateModerationRequest) (*CreateModerationResponse, error)

CreateModeration performs a "moderation" request using the OpenAI API.

Example

resp, _ := c.CreateModeration(ctx, &openai.CreateModerationRequest{
	Input: "I want to kill them.",
})

https://platform.openai.com/docs/api-reference/moderations

func (*Client) DeleteAssistant

func (c *Client) DeleteAssistant(ctx context.Context, req *DeleteAssistantRequest) error

func (*Client) DeleteFile

func (c *Client) DeleteFile(ctx context.Context, req *DeleteFileRequest) (*DeleteFileResponse, error)

DeleteFile performs a "delete file" request using the OpenAI API.

Example

resp, _ := c.DeleteFile(ctx, &openai.DeleteFileRequest{
	ID: "file-123",
})

https://platform.openai.com/docs/api-reference/files/delete

func (*Client) GetAssistantFile

func (c *Client) GetAssistantFile(ctx context.Context, req *GetAssistantFileRequest) (*GetAssistantFileResponse, error)

func (*Client) GetFileContent

func (c *Client) GetFileContent(ctx context.Context, req *GetFileContentRequest) (*GetFileContentResponse, error)

GetFileContent performs a "get file content (retrieve content)" request using the OpenAI API.

Example

resp, _ := c.GetFileContent(ctx, &openai.GetFileContentRequest{
	ID: "file-123",
})

https://platform.openai.com/docs/api-reference/files/retrieve-content

func (*Client) GetFileInfo

func (c *Client) GetFileInfo(ctx context.Context, req *GetFileInfoRequest) (*GetFileInfoResponse, error)

GetFileInfo performs a "get file info (retrieve)" request using the OpenAI API.

Example

resp, _ := c.GetFileInfo(ctx, &openai.GetFileRequest{
	ID: "file-123",
})

https://platform.openai.com/docs/api-reference/files/retrieve

func (*Client) GetMessage

func (c *Client) GetMessage(ctx context.Context, req *GetMessageRequest) (*GetMessageResponse, error)

func (*Client) GetMessageFile

func (c *Client) GetMessageFile(ctx context.Context, req *GetMessageFileRequest) (*GetMessageFileResponse, error)

func (*Client) GetThread

func (c *Client) GetThread(ctx context.Context, req *GetThreadRequest) (*GetThreadResponse, error)

func (*Client) ListFiles

func (c *Client) ListFiles(ctx context.Context, req *ListFilesRequest) (*ListFilesResponse, error)

ListFiles performs a "list files" request using the OpenAI API.

Example

resp, _ := c.ListFiles(ctx, &openai.ListFilesRequest{})

https://platform.openai.com/docs/api-reference/files

func (*Client) ListMessageFiles

func (c *Client) ListMessageFiles(ctx context.Context, req *ListMessageFilesRequest) (*ListMessageFilesResponse, error)

func (*Client) ListMessages

func (c *Client) ListMessages(ctx context.Context, req *ListMessagesRequest) (*ListMessagesResponse, error)

func (*Client) ListModels

func (c *Client) ListModels(ctx context.Context) (*Models, error)

ListModels list model identifiers that can be used with the OpenAI API.

Example

resp, _ := client.ListModels(ctx)

for _, model := range resp.Data {
   fmt.Println(model.ID)
}

https://platform.openai.com/docs/api-reference/models/list

func (*Client) UpdateAssistant

func (c *Client) UpdateAssistant(ctx context.Context, req *UpdateAssistantRequest) (*Assistant, error)

func (*Client) UpdateMessage

func (c *Client) UpdateMessage(ctx context.Context, req *UpdateMessageRequest) (*UpdateMessageResponse, error)

func (*Client) UpdateThread

func (c *Client) UpdateThread(ctx context.Context, req *UpdateThreadRequest) (*UpdateThreadResponse, error)

func (*Client) UploadFile

func (c *Client) UploadFile(ctx context.Context, req *UploadFileRequest) (*UploadFileResponse, error)

UploadFile performs a "upload file" request using the OpenAI API.

Example

resp, _ := c.UploadFile(ctx, &openai.UploadFileRequest{
	Name:    "fine-tune.jsonl",
	Purpose: "fine-tune",
})

https://platform.openai.com/docs/api-reference/files

type ClientOption

type ClientOption func(*Client)

ClientOption is a function that configures a Client.

func WithHTTPClient

func WithHTTPClient(c *http.Client) ClientOption

WithHTTPClient is a ClientOption that sets the HTTP client to use for requests.

If the client is nil, then http.DefaultClient is used

func WithOrganization

func WithOrganization(org string) ClientOption

WithOrganization is a ClientOption that sets the organization to use for requests.

https://platform.openai.com/docs/api-reference/authentication

type CreateAudioTranscriptionResponse

type CreateAudioTranscriptionResponse interface {
	Text() string
}

https://platform.openai.com/docs/api-reference/audio/create

type CreateAudioTranscriptionResponseJSON

type CreateAudioTranscriptionResponseJSON struct {
	RawText string `json:"text"`
}

https://platform.openai.com/docs/api-reference/audio/create

type CreateChatRequest

type CreateChatRequest struct {
	// The model to use for the chat (e.g. "gpt3.5-turbo" or "gpt4").
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-model
	//
	// Required.
	Model string `json:"model,omitempty"`

	// The context window of the conversation, which is a list of messages.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages
	//
	// Required.
	Messages []ChatMessage `json:"messages,omitempty"`

	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature
	//
	// Optional.
	Temperature float64 `json:"temperature,omitempty"`

	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p
	//
	// Optional.
	TopP float64 `json:"top_p,omitempty"`

	// The number of responses to return, which is typically 1 (the default).
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-n
	//
	// Optional.
	N int `json:"n,omitempty"`

	// Enable streaming mode, which will return a stream instead of a list of
	// responses. This is useful for longer messages, where the caller can
	// process the response incrementally, instead of waiting for the entire
	// response to be returned.
	//
	// You can use this to enable a fun "typing" effect while the chat bot
	// is generating the response, or start transmitting the response as
	// soon as the first few tokens are available.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream
	//
	// Optional.
	Stream bool `json:"stream,omitempty"`

	// Up to 4 sequences where the API will stop generating further tokens.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-stop
	//
	// Optional.
	Stop []string `json:"stop,omitempty"`

	// The maximum number of tokens to generate in the chat completion.
	//
	// The total length of input tokens and generated tokens is limited
	// by the model's context length.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens
	//
	// Optional.
	MaxTokens int `json:"max_tokens,omitempty"`

	// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether
	// they appear in the text so far, increasing the model's likelihood to talk about new topics.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty
	//
	// Optional.
	PresencePenalty float64 `json:"presence_penalty,omitempty"`

	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
	// frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty
	//
	// Optional.
	FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`

	// Modify the likelihood of specified tokens appearing in the completion.
	//
	// This is a json object that maps tokens (specified by their token ID in the tokenizer)
	// to an associated bias value from -100 to 100. Mathematically, the bias is added to
	// the logits generated by the model prior to sampling. The exact effect will vary per
	// model, but values between -1 and 1 should decrease or increase likelihood of selection;
	// values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	//
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
	//
	// Optional.
	LogitBias map[string]float64 `json:"logit_bias,omitempty"`

	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	//
	// https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-user
	//
	// Optional.
	User string `json:"user,omitempty"`

	// Functions are the functions that can be called by the model.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions
	//
	// Optional.
	Functions []*Function `json:"functions,omitempty"`

	// Controls how the model responds to function calls. "none" means the model does not
	// call a function, and responds to the end-user. "auto" means the model can pick
	// between an end-user or calling a function. Specifying a particular function
	// via {"name":\ "my_function"} forces the model to call that function. "none"
	// is the default when no functions are present. "auto" is the default if
	// functions are present.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call
	//
	// Optional.
	FunctionCall FunctionCallControl `json:"function_call,omitempty"`
}

CreateChatRequest is sent to the API, which will return a chat response.

This is the substrate for that OpenAI chat API, which can be used for enabling "chat sessions". The API is designed to be used in a loop, where the response from the previous request is typically used as the input for the next request, specifcally the `messages` field, which contains the current "context window" of the conversation that must be maintained by the caller.

This is where the art of building a chat bot comes in, as the caller must decide how to manage the context window, e.g. how to maintain the long term memory of the conversation; what to include in the next request, and what to discard; how to handle the "end of conversation" signal, etc.

To identify similar messages from past "memories", the caller can use the embedding API to obtain embeddings for the messages, and then use a similarity metric to identify similar messages; cosine similarity is often used, but it is not the only option.

https://platform.openai.com/docs/api-reference/chat/create

type CreateChatResponse

type CreateChatResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Model   string `json:"model"`
	Usage   struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
	Choices []struct {
		Message      ChatMessage `json:"message"`
		FinishReason string      `json:"finish_reason"`
		Index        int         `json:"index"`
	} `json:"choices"`

	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream
	Stream io.ReadCloser `json:"-"`
}

CreateChatResponse is recieved in response to a chat request.

https://platform.openai.com/docs/api-reference/chat/create

func (*CreateChatResponse) FirstChoice

func (r *CreateChatResponse) FirstChoice() (*ChatMessage, error)

FirstChoice returns the first choice in the response, or an error if there are no choices.

func (*CreateChatResponse) RandomChoice

func (r *CreateChatResponse) RandomChoice() (*ChatMessage, error)

RandomChoice returns a random choice in the response, or an error if there are no choices.

func (*CreateChatResponse) ReadStream

func (r *CreateChatResponse) ReadStream(ctx context.Context, cb func(*ChatMessageStreamChunk) error) error

ReadStream reads the stream, applying the callback to each message.

Messages are sent via sever-sent events (SSE).

type CreateCompletionRequest

type CreateCompletionRequest struct {
	// ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.
	//
	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-model
	Model string `json:"model"`

	// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	//
	// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model
	// will generate as if from the beginning of a new document.
	//
	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-prompt
	Prompt []string `json:"prompt"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-suffix
	Suffix string `json:"suffix,omitempty"`

	// The maximum number of tokens to generate in the completion.
	//
	// The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context
	// length of 2048 tokens (except for the newest models, which support 4096).
	//
	// Defaults to 16 if not specified.
	//
	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-max_tokens
	MaxTokens int `json:"max_tokens,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
	//
	// Defaults to 1 if not specified.
	Temperature float64 `json:"temperature,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-top_p
	//
	// Defaults to 1 if not specified.
	TopP float64 `json:"top_p,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-n
	//
	// Defaults to 1 if not specified.
	N int `json:"n,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-stream
	//
	// Defaults to false if not specified.
	Stream bool `json:"stream,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-logprobs
	//
	// Defaults to nil.
	LogProbs *int `json:"logprobs,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-echo
	//
	// Defaults to false if not specified.
	Echo bool `json:"echo,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-stop
	Stop []string `json:"stop,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty
	//
	// Defaults to 0 if not specified.
	PresencePenalty int `json:"presence_penalty,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty
	//
	// Defaults to 0 if not specified.
	FrequencyPenalty int `json:"frequency_penalty,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-best_of
	//
	// Defaults to 1 if not specified.
	//
	// WARNING: Because this parameter generates many completions, it can quickly consume your token quota.
	//          Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	BestOf int `json:"best_of,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
	//
	// Defaults to nil.
	LogitBias map[string]float64 `json:"logit_bias,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-user
	//
	// Defaults to nil.
	User string `json:"user,omitempty"`
}

CreateCompletionRequest contains information for a "completion" request to the OpenAI API. This is the fundamental request type for the API.

https://platform.openai.com/docs/api-reference/completions/create

type CreateCompletionResponse

type CreateCompletionResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Model   string `json:"model"`
	Choices []struct {
		Text         string      `json:"text"`
		Index        int         `json:"index"`
		Logprobs     interface{} `json:"logprobs"`
		FinishReason string      `json:"finish_reason"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

CreateCompletionResponse is the response from a "completion" request to the OpenAI API.

https://platform.openai.com/docs/api-reference/completions/create

type CreateEditResponse

type CreateEditResponse struct {
	Object  string `json:"object"`
	Created int    `json:"created"`
	Choices []struct {
		Text  string `json:"text"`
		Index int    `json:"index"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

https://platform.openai.com/docs/api-reference/edits/create

type CreateEmbeddingResponse

type CreateEmbeddingResponse struct {
	Object string `json:"object"`
	Data   []struct {
		Object    string    `json:"object"`
		Embedding []float64 `json:"embedding"`
		Index     int       `json:"index"`
	} `json:"data"`
	Model string `json:"model"`
	Usage struct {
		PromptTokens int `json:"prompt_tokens"`
		TotalTokens  int `json:"total_tokens"`
	} `json:"usage"`
}

CreateEmbeddingResponse ...

https://platform.openai.com/docs/guides/embeddings/what-are-embeddings

type CreateFineTuneRequest

type CreateFineTuneRequest struct {
	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file
	//
	// Required.
	TrainingFile string `json:"training_file"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-validation_file
	//
	// Optional.
	ValidationFile string `json:"validation_file,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-model
	//
	// Optional. Defaults to "curie".
	Model string `json:"model,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-epochs
	//
	// Optional. Defaults to 4.
	Epochs int `json:"n_epochs,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-batch_size
	//
	// Optional. Defaults to 32.
	BatchSize int `json:"batch_size,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-learning_rate_multiplier
	//
	// Optional. Default depends on the batch size.
	LearningRateMultiplier float64 `json:"learning_rate_multiplier,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-prompt_loss_weight
	//
	// Optional. Defaults to 0.01
	PromptLossWeight float64 `json:"prompt_loss_weight,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-compute_classification_metrics
	//
	// Optional. Defaults to false.
	ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_n_classes
	//
	// Optional, but required for multi-class classification.
	ClassificationNClasses int `json:"classification_n_classes,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_positive_class
	//
	// Optional, but required for binary classification.
	ClassificationPositiveClass string `json:"classification_positive_class,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_betas
	//
	// Optional, only used for binary classification.
	ClassificationBetas []float64 `json:"classification_betas,omitempty"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-suffix
	//
	// A string of up to 40 characters that will be added to your fine-tuned model name.
	//
	// For example, a suffix of "custom-model-name" would produce a model name like
	// `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
	//
	// Optional.
	Suffix string `json:"suffix,omitempty"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/create

type CreateFineTuneResponse

type CreateFineTuneResponse struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Model     string `json:"model"`
	CreatedAt int    `json:"created_at"`
	Events    []struct {
		Object    string `json:"object"`
		CreatedAt int    `json:"created_at"`
		Level     string `json:"level"`
		Message   string `json:"message"`
	} `json:"events"`
	FineTunedModel interface{} `json:"fine_tuned_model"`
	Hyperparams    struct {
		BatchSize              int     `json:"batch_size"`
		LearningRateMultiplier float64 `json:"learning_rate_multiplier"`
		NEpochs                int     `json:"n_epochs"`
		PromptLossWeight       float64 `json:"prompt_loss_weight"`
	} `json:"hyperparams"`
	OrganizationID  string        `json:"organization_id"`
	ResultFiles     []interface{} `json:"result_files"`
	Status          string        `json:"status"`
	ValidationFiles []interface{} `json:"validation_files"`
	TrainingFiles   []struct {
		ID        string `json:"id"`
		Object    string `json:"object"`
		Bytes     int    `json:"bytes"`
		CreatedAt int    `json:"created_at"`
		Filename  string `json:"filename"`
		Purpose   string `json:"purpose"`
	} `json:"training_files"`
	UpdatedAt int `json:"updated_at"`
}

CreateFineTuneResponse is the response from a "create fine-tune" request.

https://platform.openai.com/docs/api-reference/fine-tunes/create

type CreateImageRequest

type CreateImageRequest struct {
	// https://platform.openai.com/docs/api-reference/images/create#images/create-prompt
	//
	// Required. Max of 1,000 characters.
	Prompt string `json:"prompt"`

	// https://platform.openai.com/docs/api-reference/images/create#images-create-model
	//
	// Optional. Defaults to "dall-e-2".
	Model string `json:"model,omitempty"`

	// https://platform.openai.com/docs/api-reference/completions/create#completions/create-n
	//
	// Number of images to generate. Defaults to 1 if not specified. Most be between 1 and 10.
	N int `json:"n,omitempty"`

	// https://platform.openai.com/docs/api-reference/images/create#images/create-size
	//
	// Size of the image to generate. Must be one of 256x256, 512x512, or 1024x1024.
	Size string `json:"size,omitempty"`

	// https://platform.openai.com/docs/api-reference/images/create#images/create-response_format
	//
	// Defaults to "url". The format in which the generated images are returned. Must be one of "url" or "b64_json".
	ResponseFormat string `json:"response_format,omitempty"`

	// https://platform.openai.com/docs/api-reference/images/create#images/create-user
	User string `json:"user,omitempty"`

	// https://platform.openai.com/docs/api-reference/images/create#images-create-quality
	//
	// Optional. Either "standard" or "hd", defaults to "standard".
	Quality string `json:"quality,omitempty"`

	// https://platform.openai.com/docs/api-reference/images/create#images-create-style
	//
	// Optional. Either "vivid" or "natural", defaults to "vivid". Only valid for "dall-e-3" model.
	Style string `json:"style,omitempty"`
}

https://platform.openai.com/docs/api-reference/images/create

type CreateImageResponse

type CreateImageResponse struct {
	Created int `json:"created"`
	Data    []struct {
		// One of the following: "url" or "b64_json"
		URL     *string `json:"url"`
		B64JSON *string `json:"b64_json"`

		// If there were any prompt revisions made by the API.
		// Use this to refine further.
		RevisedPrompt *string `json:"revised_prompt"`
	} `json:"data"`
}

CreateImageResponse ...

type CreateModerationRequest

type CreateModerationRequest struct {
	// https://platform.openai.com/docs/api-reference/moderations/create#moderations/create-model
	//
	// Optional. The model to use for moderation. Defaults to "text-moderation-latest".
	Model string `json:"model"`

	// https://platform.openai.com/docs/api-reference/moderations/create#moderations/create-input
	//
	// Required. The text to moderate.
	Input string `json:"input"`
}

https://platform.openai.com/docs/api-reference/moderations/create

type CreateModerationResponse

type CreateModerationResponse struct {
	ID      string `json:"id"`
	Model   string `json:"model"`
	Results []struct {
		Categories struct {
			Hate            bool `json:"hate"`
			HateThreatening bool `json:"hate/threatening"`
			SelfHarm        bool `json:"self-harm"`
			Sexual          bool `json:"sexual"`
			SexualMinors    bool `json:"sexual/minors"`
			Violence        bool `json:"violence"`
			ViolenceGraphic bool `json:"violence/graphic"`
		} `json:"categories"`
		CategoryScores struct {
			Hate            float64 `json:"hate"`
			HateThreatening float64 `json:"hate/threatening"`
			SelfHarm        float64 `json:"self-harm"`
			Sexual          float64 `json:"sexual"`
			SexualMinors    float64 `json:"sexual/minors"`
			Violence        float64 `json:"violence"`
			ViolenceGraphic float64 `json:"violence/graphic"`
		} `json:"category_scores"`
		Flagged bool `json:"flagged"`
	} `json:"results"`
}

CreateModerationResponse ...

https://platform.openai.com/docs/guides/moderations/what-are-moderations

type CreateRunRequest

type CreateRunRequest struct {
	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-thread_id
	//
	// Required.
	ThreadID string `json:"-"`

	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-assistant_id
	//
	// Required.
	AssistantID string `json:"assistant_id"`

	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-model
	//
	// Optional. Defaults to the model associated with the assistant.
	Model string `json:"model,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-instructions
	//
	// Optional. Defaults to the instructions associated with the assistant.
	Instructions string `json:"instructions,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tools
	//
	// Optional. Defaults to the tools associated with the assistant.
	Tools []map[string]any `json:"tools,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-metadata
	//
	// Optional.
	Metadata map[string]any `json:"metadata,omitempty"`
}

https://platform.openai.com/docs/api-reference/runs/createRun

type CreateThreadAndRunRequest

type CreateThreadAndRunRequest struct {
	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-assistant_id
	//
	// Required.
	AssistantID string `json:"assistant_id"`

	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-thread
	//
	// Optional.
	Thread *CreateThreadAndRunRequestInitialThread `json:"thread,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-model
	//
	// Optional. Defaults to the model associated with the assistant.
	Model string `json:"model,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-instructions
	//
	// Optional. Defaults to the instructions associated with the assistant.
	Instructions string `json:"instructions,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-tools
	//
	// Optional. Defaults to the tools associated with the assistant.
	Tools []map[string]any `json:"tools,omitempty"`

	// https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-metadata
	//
	// Optional.
	Metadata map[string]any `json:"metadata,omitempty"`
}

https://platform.openai.com/docs/api-reference/runs/createThreadAndRun

type CreateThreadAndRunRequestInitialThread

type CreateThreadAndRunRequestInitialThread struct {
	Messages []*CreateThreadAndRunRequestInitialThreadMessage `json:"messages,omitempty"`
	Metadata map[string]any                                   `json:"metadata,omitempty"`
}

type CreateThreadAndRunRequestInitialThreadMessage

type CreateThreadAndRunRequestInitialThreadMessage struct {
	Role     string         `json:"role"`
	Content  string         `json:"content"`
	FilesIDs []string       `json:"file_ids,omitempty"`
	Metadata map[string]any `json:"metadata,omitempty"`
}

https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-thread

type DeleteFileRequest

type DeleteFileRequest struct {
	// ID of the file to delete.
	//
	// Required.
	ID string `json:"id"`
}

https://platform.openai.com/docs/api-reference/files/delete

type DeleteFileResponse

type DeleteFileResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

DeleteFileResponse ...

https://platform.openai.com/docs/api-reference/files/delete

type DeleteFineTuneModelResponse

type DeleteFineTuneModelResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/delete-model

type Function

type Function struct {
	// Name is the name of the function.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-name
	//
	// Required.
	Name string `json:"name"`

	// Description is a description of the function.
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-description
	//
	// Optional.
	Description string `json:"description,omitempty"`

	// Parameters are the arguments to the function.
	//
	// The parameters the functions accepts, described as a JSON Schema object.
	// See the guide for examples, and the JSON Schema reference for documentation
	// about the format.
	//
	// https://json-schema.org/understanding-json-schema/
	//
	// https://platform.openai.com/docs/guides/gpt/function-calling
	//
	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-parameters
	//
	// Required.
	Parameters *JSONSchema `json:"parameters,omitempty"`
}

Function is a logical function that can be called by the model.

type FunctionCall

type FunctionCall struct {
	Name      string                `json:"name"`
	Arguments FunctionCallArguments `json:"arguments"`
}

FunctionCall describes a function call.

func (*FunctionCall) MarshalJSON

func (f *FunctionCall) MarshalJSON() ([]byte, error)

MarshalJSON marshals the function call into a JSON string.

func (*FunctionCall) UnmarshalJSON

func (f *FunctionCall) UnmarshalJSON(b []byte) error

Implement custom JSON marhsalling and unmarhsalling to handle arguments, which come from a JSON string from the API directly.

We turn this into a map[string]any that is a little easier to work with.

type FunctionCallArguments

type FunctionCallArguments map[string]any

FunctionCallArguments is a map of argument name to value.

type FunctionCallControl

type FunctionCallControl interface {
	// contains filtered or unexported methods
}

FunctionCallControl is an option used to control the behavior of a function call in a chat request. It can be used to specify the name of the function to call, "none", or "auto" (the default).

https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call

type FunctionCallControlAuto

type FunctionCallControlAuto struct{}

FunctionCallControlAuto is a function call option that indicates that the function to call should be determined automatically.

func (FunctionCallControlAuto) MarshalJSON

func (FunctionCallControlAuto) MarshalJSON() ([]byte, error)

MarhsalJSON marshals the function call option into a JSON string.

type FunctionCallControlName

type FunctionCallControlName string

FunctionCallControlName is a function call option that indicates that the function to call should be determined by the given name.

func FunctionCallName

func FunctionCallName(name string) FunctionCallControlName

func (FunctionCallControlName) MarshalJSON

func (f FunctionCallControlName) MarshalJSON() ([]byte, error)

MarhsalJSON marshals the function call option into a JSON string.

type FunctionCallControlNone

type FunctionCallControlNone struct{}

FunctionCallControlNone is a function call option that indicates that no function should be called.

func (FunctionCallControlNone) MarshalJSON

func (FunctionCallControlNone) MarshalJSON() ([]byte, error)

MarhsalJSON marshals the function call option into a JSON string.

type GetAssistantRequest

type GetAssistantRequest struct {
	// https://platform.openai.com/docs/api-reference/assistants/get#assistants/get-id
	//
	// Required.
	ID string `json:"assistant_id"`
}

type GetFileContentRequest

type GetFileContentRequest struct {
	// ID of the file to retrieve.
	//
	// Required.
	ID string `json:"id"`
}

https://platform.openai.com/docs/api-reference/files/retrieve-content

type GetFileContentResponse

type GetFileContentResponse struct {
	// Body is the file content returned by the OpenAI API.
	//
	// The caller is responsible for closing the body, and should do so as soon as possible.
	Body io.ReadCloser
}

GetFileContentResponse ...

https://platform.openai.com/docs/api-reference/files/retrieve-content

type GetFileInfoRequest

type GetFileInfoRequest struct {
	// ID of the file to retrieve.
	//
	// Required.
	ID string `json:"id"`
}

https://platform.openai.com/docs/api-reference/files/retrieve

type GetFileInfoResponse

type GetFileInfoResponse struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Bytes     int    `json:"bytes"`
	CreatedAt int    `json:"created_at"`
	Filename  string `json:"filename"`
	Purpose   string `json:"purpose"`
}

GetFileInfoResponse ...

https://platform.openai.com/docs/api-reference/files/retrieve

type GetFineTuneResponse

type GetFineTuneResponse struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Model     string `json:"model"`
	CreatedAt int    `json:"created_at"`
	Events    []struct {
		Object    string `json:"object"`
		CreatedAt int    `json:"created_at"`
		Level     string `json:"level"`
		Message   string `json:"message"`
	} `json:"events"`
	FineTunedModel string `json:"fine_tuned_model"`
	Hyperparams    struct {
		BatchSize              int     `json:"batch_size"`
		LearningRateMultiplier float64 `json:"learning_rate_multiplier"`
		NEpochs                int     `json:"n_epochs"`
		PromptLossWeight       float64 `json:"prompt_loss_weight"`
	} `json:"hyperparams"`
	OrganizationID string `json:"organization_id"`
	ResultFiles    []struct {
		ID        string `json:"id"`
		Object    string `json:"object"`
		Bytes     int    `json:"bytes"`
		CreatedAt int    `json:"created_at"`
		Filename  string `json:"filename"`
		Purpose   string `json:"purpose"`
	} `json:"result_files"`
	Status          string `json:"status"`
	ValidationFiles []any  `json:"validation_files"`
	TrainingFiles   []struct {
		ID        string `json:"id"`
		Object    string `json:"object"`
		Bytes     int    `json:"bytes"`
		CreatedAt int    `json:"created_at"`
		Filename  string `json:"filename"`
		Purpose   string `json:"purpose"`
	} `json:"training_files"`
	UpdatedAt int `json:"updated_at"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/retrieve

type JSONSchema

type JSONSchema struct {
	// Type is the type of the schema.
	Type string `json:"type,omitempty"`

	// Description is the description of the schema.
	Description string `json:"description,omitempty"`

	// Properties is the properties of the schema.
	Properties map[string]*JSONSchema `json:"properties,omitempty"`

	// Required is the required properties of the schema.
	Required []string `json:"required,omitempty"`

	// Enum is the enum of the schema.
	Enum []string `json:"enum,omitempty"`

	// Items is the items of the schema.
	Items *JSONSchema `json:"items,omitempty"`

	// AdditionalProperties is the additional properties of the schema.
	AdditionalProperties *JSONSchema `json:"additionalProperties,omitempty"`

	// Ref is the ref of the schema.
	Ref string `json:"$ref,omitempty"`

	// AnyOf is the anyOf of the schema.
	AnyOf []*JSONSchema `json:"anyOf,omitempty"`

	// AllOf is the allOf of the schema.
	AllOf []*JSONSchema `json:"allOf,omitempty"`

	// OneOf is the oneOf of the schema.
	OneOf []*JSONSchema `json:"oneOf,omitempty"`

	// Default is the default of the schema.
	Default any `json:"default,omitempty"`

	// Pattern is the pattern of the schema.
	Pattern string `json:"pattern,omitempty"`

	// MinItems is the minItems of the schema.
	MinItems int `json:"minItems,omitempty"`

	// MaxItems is the maxItems of the schema.
	MaxItems int `json:"maxItems,omitempty"`

	// UniqueItems is the uniqueItems of the schema.
	UniqueItems bool `json:"uniqueItems,omitempty"`

	// MultipleOf is the multipleOf of the schema.
	MultipleOf int `json:"multipleOf,omitempty"`

	// Min is the minimum of the schema.
	Min int `json:"min,omitempty"`

	// Max is the maximum of the schema.
	Max int `json:"max,omitempty"`

	// ExclusiveMin is the exclusiveMinimum of the schema.
	ExclusiveMin bool `json:"exclusiveMinimum,omitempty"`

	// ExclusiveMax is the exclusiveMaximum of the schema.
	ExclusiveMax bool `json:"exclusiveMaximum,omitempty"`
}

JSONSchema is a JSON Schema.

https://json-schema.org/understanding-json-schema/reference/index.html

type ListFilesRequest

type ListFilesRequest struct {
	// https://platform.openai.com/docs/api-reference/files/list#files-list-purpose
	//
	// Optional. Filter to only list files with the specified purpose (assistants, fine-tune, etc).
	Purpose string `json:"purpose,omitempty"`
}

https://platform.openai.com/docs/api-reference/files/list

type ListFilesResponse

type ListFilesResponse struct {
	Data []struct {
		ID        string `json:"id"`
		Object    string `json:"object"`
		Bytes     int    `json:"bytes"`
		CreatedAt int    `json:"created_at"`
		Filename  string `json:"filename"`
		Purpose   string `json:"purpose"`
	} `json:"data"`
	Object string `json:"object"`
}

https://platform.openai.com/docs/api-reference/files/list

type ListFineTuneEventsResponse

type ListFineTuneEventsResponse struct {
	Object string `json:"object"`
	Data   []struct {
		Object    string `json:"object"`
		CreatedAt int    `json:"created_at"`
		Level     string `json:"level"`
		Message   string `json:"message"`
	} `json:"data"`

	// https://platform.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-stream
	//
	// Only present if stream=true. Up to the caller to close the stream, e.g.: defer res.Stream.Close()
	Stream io.ReadCloser `json:"-"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/events

type ListFineTunesResponse

type ListFineTunesResponse struct {
	Object string `json:"object"`
	Data   []struct {
		ID              string         `json:"id"`
		Object          string         `json:"object"`
		Model           string         `json:"model"`
		CreatedAt       int            `json:"created_at"`
		FineTunedModel  any            `json:"fine_tuned_model"`
		Hyperparams     map[string]any `json:"hyperparams"`
		OrganizationID  string         `json:"organization_id"`
		ResultFiles     []any          `json:"result_files"`
		Status          string         `json:"status"`
		ValidationFiles []any          `json:"validation_files"`
		TrainingFiles   []any          `json:"training_files"`
		UpdatedAt       int            `json:"updated_at"`
	} `json:"data"`
}

https://platform.openai.com/docs/api-reference/fine-tunes/list

type ListRunStepsResponse

type ListRunStepsResponse struct {
	Data []RunStep `json:"data"`
}

https://platform.openai.com/docs/api-reference/runs/listRunSteps

type MessageFile

type MessageFile struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Created   int    `json:"created"`
	MessageID string `json:"message_id"`
}

https://platform.openai.com/docs/api-reference/messages/file-object

type Model

type Model = string

Model is a known OpenAI model identifier.

const (
	// ModelAda is the Ada model.
	//
	// Ada is usually the fastest model and can perform tasks like parsing text, address correction and certain kinds of classification
	// tasks that don’t require too much nuance. Ada’s performance can often be improved by providing more context.
	//
	// Good at: Parsing text, simple classification, address correction, keywords
	//
	// Note: Any task performed by a faster model like Ada can be performed by a more powerful model like Curie or Davinci.
	//
	// https://beta.openai.com/docs/models/ada
	ModelAda Model = "ada"

	// ModelBabbage is the Babbage model.
	//
	// Babbage can perform straightforward tasks like simple classification. It’s also quite capable when it comes to Semantic Search
	// ranking how well documents match up with search queries.
	//
	// Good at: Moderate classification, semantic search classification
	//
	// https://beta.openai.com/docs/models/babbage
	ModelBabbage Model = "babbage"

	// ModelCurie is the Curie model.
	//
	// Curie is extremely powerful, yet very fast. While Davinci is stronger when it comes to analyzing complicated text, Curie is q
	// uite capable for many nuanced tasks like sentiment classification and summarization. Curie is also quite good at answering
	// questions and performing Q&A and as a general service chatbot.
	//
	// Good at: Language translation, complex classification, text sentiment, summarization
	//
	// https://beta.openai.com/docs/models/curie
	ModelCurie Model = "curie"

	// ModelDavinci is the Davinci model.
	//
	// Davinci is the most capable model family and can perform any task the other models can perform and often with less instruction.
	// For applications requiring a lot of understanding of the content, like summarization for a specific audience and creative content
	// generation, Davinci is going to produce the best results. These increased capabilities require more compute resources, so Davinci
	// costs more per API call and is not as fast as the other models.
	//
	// Another area where Davinci shines is in understanding the intent of text. Davinci is quite good at solving many kinds of logic problems
	// and explaining the motives of characters. Davinci has been able to solve some of the most challenging AI problems involving cause and effect.
	//
	// Good at: Complex intent, cause and effect, summarization for audience
	//
	// https://beta.openai.com/docs/models/davinci
	ModelDavinci Model = "davinci"

	// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following.
	// Also supports inserting completions within text.
	ModelTextDavinciEdit003 Model = "text-davinci-003"

	// Very capable, but faster and lower cost than Davinci.
	ModelTextCurie001 Model = "text-curie-001"

	// Capable of straightforward tasks, very fast, and lower cost.
	ModelBabbage001 Model = "text-babbage-001"

	// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
	ModelAda001 Model = "text-ada-001"

	// Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code.
	ModelCodeDavinci002 Model = "code-davinci-002"

	// Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications.
	ModelCodeCushman001 Model = "code-cushman-001"

	// Used for the CreateEdit API endpoint.
	ModelTextDavinciEdit001 Model = "text-davinci-edit-001"
	ModelCodeDavinciEdit001 Model = "code-davinci-edit-001"

	// https://platform.openai.com/docs/guides/embeddings/embedding-models
	ModelTextEmbeddingAda001 Model = "text-embedding-ada-001"

	// This is the previously recommend model for nearly all embedding use cases.
	//
	// https://openai.com/blog/new-and-improved-embedding-model
	ModelTextEmbeddingAda002 Model = "text-embedding-ada-002"

	// These models are the latest and greatest for embedding use cases.
	//
	// https://openai.com/blog/new-embedding-models-and-api-updates
	ModelTextEmbedding3Small Model = "text-embedding-3-small"
	ModelTextEmbedding3Large Model = "text-embedding-3-large"

	// https://platform.openai.com/docs/api-reference/chat/create#chat/create-model
	ModelGPT35Turbo             Model = "gpt-3.5-turbo"
	ModelGPT35Turbo0301         Model = "gpt-3.5-turbo-0301"
	ModelGPT35Turbo0613         Model = "gpt-3.5-turbo-0613"
	ModelGPT35Turbo1106         Model = "gpt-3.5-turbo-1106"
	ModelGPT35Turbo16k          Model = "gpt-3.5-turbo-16k"
	ModelGPT35Turbo16k0613      Model = "gpt-3.5-turbo-16k-0613"
	ModelGPT35TurboInstruct     Model = "gpt-3.5-turbo-instruct"
	ModelGPT35TurboInstruct0914 Model = "gpt-3.5-turbo-instruct-0914"
	ModelGPT35Turbo0125         Model = "gpt-3.5-turbo-0125"

	ModelGPT4              Model = "gpt-4"
	ModelGPT40314          Model = "gpt-4-0314"
	ModelGPT40613          Model = "gpt-4-0613"
	ModelGPT432K           Model = "gpt-4-32k"
	ModelGPT432K0314       Model = "gpt-4-32k-0314"
	ModelGPT41106Previw    Model = "gpt-4-1106-preview"
	ModelGPT4VisionPreview Model = "gpt-4-vision-preview"
	ModelGPT40125Preview   Model = "gpt-4-0125-preview"
	ModelGPT4TurboPreview  Model = "gpt-4-turbo-preview"

	ModelWhisper1 Model = "whisper-1"

	ModelTTS1       Model = "tts-1"
	ModelTTS11106   Model = "tts-1-1106"
	ModelTTS1HD     Model = "tts-1-hd"
	ModelTTS1HD1106 Model = "tts-1-hd-1106"

	ModelTextModeration007    Model = "text-moderation-007"
	ModelTextModerationLatest Model = "text-moderation-latest"
	ModelTextModerationStable Model = "text-moderation-stable"

	ModelDallE2 Model = "dall-e-2"
	ModelDallE3 Model = "dall-e-3"
)

https://beta.openai.com/docs/models/finding-the-right-model

type Models

type Models struct {
	Object string `json:"object"`
	Data   []struct {
		ID         string `json:"id"`
		Object     string `json:"object"`
		Created    int    `json:"created"`
		OwnedBy    string `json:"owned_by"`
		Permission []struct {
			ID                 string      `json:"id"`
			Object             string      `json:"object"`
			Created            int         `json:"created"`
			AllowCreateEngine  bool        `json:"allow_create_engine"`
			AllowSampling      bool        `json:"allow_sampling"`
			AllowLogprobs      bool        `json:"allow_logprobs"`
			AllowSearchIndices bool        `json:"allow_search_indices"`
			AllowView          bool        `json:"allow_view"`
			AllowFineTuning    bool        `json:"allow_fine_tuning"`
			Organization       string      `json:"organization"`
			Group              interface{} `json:"group"`
			IsBlocking         bool        `json:"is_blocking"`
		} `json:"permission"`
		Root   string      `json:"root"`
		Parent interface{} `json:"parent"`
	} `json:"data"`
}

https://platform.openai.com/docs/api-reference/models/list

type RateLimiters

type RateLimiters struct {
	Chat struct {
		Requests *rate.Limiter
		Tokens   *rate.Limiter
	}
	Text struct {
		Requests *rate.Limiter
		Tokens   *rate.Limiter
	}
	Embedding struct {
		Requests *rate.Limiter
		Tokens   *rate.Limiter
	}
	Images struct {
		Requests *rate.Limiter
	}
	Audio struct {
		Requests *rate.Limiter
	}
}

RateLimiters is a struct that holds all of the rate limiters for the OpenAI API that can be used by clients to rate limit their requests.

These are not enforced by the client by default, but can be used to rate limit requests to the OpenAI API by calling the `Allow()` method on appropriate limiter before making a request.

Example

// If the rate limiter allows the request, make the request.
if openai.RateLimits.Chat.Requests.Allow() {
    resp, err := client.CreatChat(ctx, &openai.CreateChatRequest{
        ...
    })
    ...
}

 // Wait for the rate limiter to allow the request.
 for openai.RateLimits.Chat.Requests.Wait(ctx) {
     resp, err := client.CreatChat(ctx, &openai.CreateChatRequest{
         ...
     })
     ...
 }

func NewRateLimiters

func NewRateLimiters() *RateLimiters

NewRateLimiters returns a new set of rate limiters for the OpenAI API.

type Role

type Role = string

Role is the role of the user for a chat message.

const (
	// RoleSystem is a special used to ground the model within the context of the conversation.
	//
	// For example, it may be used to provide a name for the assistant, or to provide other global information
	// or instructions that the model should know about.
	RoleSystem Role = "system"

	// RoleUser is the role of the user for a chat message.
	RoleUser Role = "user"

	// RoleAssistant is the role of the assistant for a chat message.
	RoleAssistant Role = "assistant"

	// RoleFunction is a special role used to represent a function call.
	RoleFunction Role = "function"
)

type Run

type Run struct {
	ID             string           `json:"id"`
	Object         string           `json:"object"`
	CreatedAt      int              `json:"created_at"`
	ThreadID       string           `json:"thread_id"`
	AssistantID    string           `json:"assistant_id"`
	Status         string           `json:"status"`
	RequiredAction string           `json:"required_action,omitempty"`
	LastError      map[string]any   `json:"last_error,omitempty"`
	ExpiresAt      int              `json:"expires_at"`
	StartedAt      int              `json:"started_at,omitempty"`
	CancelledAt    int              `json:"cancelled_at,omitempty"`
	FailedAt       int              `json:"failed_at,omitempty"`
	CompletedAt    int              `json:"completed_at,omitempty"`
	Model          string           `json:"model"`
	Instructions   string           `json:"instructions"`
	Tools          []map[string]any `json:"tools"`
	FileIDs        []string         `json:"file_ids"`
	Metadata       map[string]any   `json:"metadata"`
}

https://platform.openai.com/docs/api-reference/runs/object

type RunStatus

type RunStatus = string

https://platform.openai.com/docs/api-reference/runs/object#runs/object-status

const (
	RunStatusQueued         RunStatus = "queued"
	RunStatusInProgress     RunStatus = "in_progress"
	RunStatusRequiresAction RunStatus = "requires_action"
	RunStatusCancelling     RunStatus = "cancelling"
	RunStatusCancelled      RunStatus = "cancelled"
	RunStatusFailed         RunStatus = "failed"
	RunStatusCompleted      RunStatus = "completed"
	RunStatusExpired        RunStatus = "expired"
)

type RunStep

type RunStep struct {
	ID          string         `json:"id"`
	Object      string         `json:"object"`
	Created     int            `json:"created"`
	AssistantID string         `json:"assistant_id"`
	ThreadID    string         `json:"thread_id"`
	RunID       string         `json:"run_id"`
	Type        string         `json:"type"`
	Status      string         `json:"status"`
	StepDetails map[string]any `json:"step_details"`
	LastError   map[string]any `json:"last_error,omitempty"`
	ExpiredAt   int            `json:"expired_at,omitempty"`
	CanceledAt  int            `json:"canceled_at,omitempty"`
	FailedAt    int            `json:"failed_at,omitempty"`
	CompletedAt int            `json:"completed_at,omitempty"`
	Metadata    map[string]any `json:"metadata,omitempty"`
}

https://platform.openai.com/docs/api-reference/runs/step-object

type Thread

type Thread struct {
	ID       string         `json:"id"`
	Object   string         `json:"object"`
	Created  int            `json:"created"`
	Metadata map[string]any `json:"metadata"`
}

https://platform.openai.com/docs/api-reference/threads/object

type ThreadMessage

type ThreadMessage struct {
	ID          string                 `json:"id"`
	Object      string                 `json:"object"`
	CreatedAt   int                    `json:"created_at"`
	ThreadID    string                 `json:"thread_id"`
	Role        string                 `json:"role"`
	Content     []ThreadMessageContent `json:"content"`
	AssistantID string                 `json:"assistant_id,omitempty"`
	RunID       string                 `json:"run_id,omitempty"`
	FileIDs     []string               `json:"file_ids,omitempty"`
	Metadata    map[string]any         `json:"metadata,omitempty"`
}

https://platform.openai.com/docs/api-reference/messages/object

type ThreadMessageContent

type ThreadMessageContent map[string]any

https://platform.openai.com/docs/api-reference/messages/object

func (ThreadMessageContent) Text

func (t ThreadMessageContent) Text() string

Text returns the text value from the thread message content, or an empty string if the text value is not present.

type UpdateThreadResponse

type UpdateThreadResponse = Thread

type UploadFileRequest

type UploadFileRequest struct {
	// Name of the JSON Lines file to be uploaded.
	//
	// If the purpose is set to "fine-tune", each line is a JSON
	// record with "prompt" and "completion" fields representing
	// your training examples.
	//
	// Required.
	Name string `json:"name"`

	// Purpose of the uploaded documents.
	//
	// Use "fine-tune" for Fine-tuning. This allows us to validate t
	// the format of the uploaded file.
	//
	// Required.
	Purpose string `json:"purpose"`

	// Body of the file to upload.
	//
	// Required.
	Body io.Reader `json:"file"` // TODO: how to handle this?
}

https://platform.openai.com/docs/api-reference/files/upload

type UploadFileResponse

type UploadFileResponse struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Bytes     int    `json:"bytes"`
	CreatedAt int    `json:"created_at"`
	Filename  string `json:"filename"`
	Purpose   string `json:"purpose"`
}

UploadFileResponse ...

https://platform.openai.com/docs/api-reference/files/upload

Directories

Path Synopsis
cmd
Package embeddings provides utilities for working with embeddings.
Package embeddings provides utilities for working with embeddings.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL