Documentation ¶
Overview ¶
Package openai provides a client for the OpenAI API.
Index ¶
- Variables
- func FunctionCallArgumentValue[T any](name string, args FunctionCallArguments) (T, error)
- func WaitForRun(ctx context.Context, client *Client, threadID, runID string, ...) error
- type Assistant
- type AssistantFile
- type AssistantToolOutput
- type AudioTranscriptableFile
- type AudioTranscriptionFile
- type AudioTranscriptionFileReadCloser
- type CancelFineTuneRequest
- type CancelFineTuneResponse
- type CancelRunRequest
- type ChatMessage
- type ChatMessageStreamChunk
- type ChatRole
- type Client
- func (c *Client) CancelFineTune(ctx context.Context, req *CancelFineTuneRequest) (*CancelFineTuneResponse, error)
- func (c *Client) CancelRun(ctx context.Context, req *CancelRunRequest) error
- func (c *Client) CreateAssistant(ctx context.Context, req *CreateAssistantRequest) (*CreateAssistantResponse, error)
- func (c *Client) CreateAssistantFile(ctx context.Context, req *CreateAssistantFileRequest) (*CreateAssistantFileResponse, error)
- func (c *Client) CreateAudioTranscription(ctx context.Context, req *CreateAudioTranscriptionRequest) (CreateAudioTranscriptionResponse, error)
- func (c *Client) CreateChat(ctx context.Context, req *CreateChatRequest) (*CreateChatResponse, error)
- func (c *Client) CreateCompletion(ctx context.Context, req *CreateCompletionRequest) (*CreateCompletionResponse, error)deprecated
- func (c *Client) CreateEdit(ctx context.Context, req *CreateEditRequest) (*CreateEditResponse, error)deprecated
- func (c *Client) CreateEmbedding(ctx context.Context, req *CreateEmbeddingRequest) (*CreateEmbeddingResponse, error)
- func (c *Client) CreateFineTune(ctx context.Context, req *CreateFineTuneRequest) (*CreateFineTuneResponse, error)
- func (c *Client) CreateImage(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error)
- func (c *Client) CreateMessage(ctx context.Context, req *CreateMessageRequest) (*CreateMessageResponse, error)
- func (c *Client) CreateModeration(ctx context.Context, req *CreateModerationRequest) (*CreateModerationResponse, error)
- func (c *Client) CreateRun(ctx context.Context, req *CreateRunRequest) (*CreateRunResponse, error)
- func (c *Client) CreateSpeech(ctx context.Context, req *CreateSpeechRequest) (io.ReadCloser, error)
- func (c *Client) CreateThread(ctx context.Context, req *CreateThreadRequest) (*CreateThreadResponse, error)
- func (c *Client) CreateThreadAndRun(ctx context.Context, req *CreateThreadAndRunRequest) (*CreateThreadAndRunResponse, error)
- func (c *Client) DeleteAssistant(ctx context.Context, req *DeleteAssistantRequest) error
- func (c *Client) DeleteAssistantFile(ctx context.Context, req *DeleteAssistantFileRequest) error
- func (c *Client) DeleteFile(ctx context.Context, req *DeleteFileRequest) (*DeleteFileResponse, error)
- func (c *Client) DeleteFineTuneModel(ctx context.Context, req *DeleteFineTuneModelRequest) (*DeleteFineTuneModelResponse, error)
- func (c *Client) DeleteThread(ctx context.Context, req *DeleteThreadRequest) error
- func (c *Client) GetAssistant(ctx context.Context, req *GetAssistantRequest) (*GetAssistantResponse, error)
- func (c *Client) GetAssistantFile(ctx context.Context, req *GetAssistantFileRequest) (*GetAssistantFileResponse, error)
- func (c *Client) GetFileContent(ctx context.Context, req *GetFileContentRequest) (*GetFileContentResponse, error)
- func (c *Client) GetFileInfo(ctx context.Context, req *GetFileInfoRequest) (*GetFileInfoResponse, error)
- func (c *Client) GetFineTune(ctx context.Context, req *GetFineTuneRequest) (*GetFineTuneResponse, error)
- func (c *Client) GetMessage(ctx context.Context, req *GetMessageRequest) (*GetMessageResponse, error)
- func (c *Client) GetMessageFile(ctx context.Context, req *GetMessageFileRequest) (*GetMessageFileResponse, error)
- func (c *Client) GetRun(ctx context.Context, req *GetRunRequest) (*GetRunResponse, error)
- func (c *Client) GetRunStep(ctx context.Context, req *GetRunStepRequest) (*GetRunStepResponse, error)
- func (c *Client) GetThread(ctx context.Context, req *GetThreadRequest) (*GetThreadResponse, error)
- func (c *Client) ListAssistantFiles(ctx context.Context, req *ListAssistantFilesRequest) (*ListAssistantFilesResponse, error)
- func (c *Client) ListAssistants(ctx context.Context, req *ListAssistantsRequest) (*ListAssistantsResponse, error)
- func (c *Client) ListFiles(ctx context.Context, req *ListFilesRequest) (*ListFilesResponse, error)
- func (c *Client) ListFineTuneEvents(ctx context.Context, req *ListFineTuneEventsRequest) (*ListFineTuneEventsResponse, error)
- func (c *Client) ListFineTunes(ctx context.Context, req *ListFineTunesRequest) (*ListFineTunesResponse, error)
- func (c *Client) ListMessageFiles(ctx context.Context, req *ListMessageFilesRequest) (*ListMessageFilesResponse, error)
- func (c *Client) ListMessages(ctx context.Context, req *ListMessagesRequest) (*ListMessagesResponse, error)
- func (c *Client) ListModels(ctx context.Context) (*Models, error)
- func (c *Client) ListRunSteps(ctx context.Context, req *ListRunStepsRequest) (*ListRunStepsResponse, error)
- func (c *Client) SubmitToolOutputs(ctx context.Context, req *SubmitToolOutputsRequest) (*SubmitToolOutputsResponse, error)
- func (c *Client) UpdateAssistant(ctx context.Context, req *UpdateAssistantRequest) (*Assistant, error)
- func (c *Client) UpdateMessage(ctx context.Context, req *UpdateMessageRequest) (*UpdateMessageResponse, error)
- func (c *Client) UpdateRun(ctx context.Context, req *UpdateRunRequest) (*UpdateRunResponse, error)
- func (c *Client) UpdateThread(ctx context.Context, req *UpdateThreadRequest) (*UpdateThreadResponse, error)
- func (c *Client) UploadFile(ctx context.Context, req *UploadFileRequest) (*UploadFileResponse, error)
- type ClientOption
- type CreateAssistantFileRequest
- type CreateAssistantFileResponse
- type CreateAssistantRequest
- type CreateAssistantResponse
- type CreateAudioTranscriptionRequest
- type CreateAudioTranscriptionResponse
- type CreateAudioTranscriptionResponseJSON
- type CreateChatRequest
- type CreateChatResponse
- type CreateCompletionRequest
- type CreateCompletionResponse
- type CreateEditRequest
- type CreateEditResponse
- type CreateEmbeddingRequest
- type CreateEmbeddingResponse
- type CreateFineTuneRequest
- type CreateFineTuneResponse
- type CreateImageRequest
- type CreateImageResponse
- type CreateMessageRequest
- type CreateMessageResponse
- type CreateModerationRequest
- type CreateModerationResponse
- type CreateRunRequest
- type CreateRunResponse
- type CreateSpeechRequest
- type CreateThreadAndRunRequest
- type CreateThreadAndRunRequestInitialThread
- type CreateThreadAndRunRequestInitialThreadMessage
- type CreateThreadAndRunResponse
- type CreateThreadRequest
- type CreateThreadResponse
- type DeleteAssistantFileRequest
- type DeleteAssistantRequest
- type DeleteFileRequest
- type DeleteFileResponse
- type DeleteFineTuneModelRequest
- type DeleteFineTuneModelResponse
- type DeleteThreadRequest
- type Function
- type FunctionCall
- type FunctionCallArguments
- type FunctionCallControl
- type FunctionCallControlAuto
- type FunctionCallControlName
- type FunctionCallControlNone
- type GetAssistantFileRequest
- type GetAssistantFileResponse
- type GetAssistantRequest
- type GetAssistantResponse
- type GetFileContentRequest
- type GetFileContentResponse
- type GetFileInfoRequest
- type GetFileInfoResponse
- type GetFineTuneRequest
- type GetFineTuneResponse
- type GetMessageFileRequest
- type GetMessageFileResponse
- type GetMessageRequest
- type GetMessageResponse
- type GetRunRequest
- type GetRunResponse
- type GetRunStepRequest
- type GetRunStepResponse
- type GetThreadRequest
- type GetThreadResponse
- type JSONSchema
- type ListAssistantFilesRequest
- type ListAssistantFilesResponse
- type ListAssistantsRequest
- type ListAssistantsResponse
- type ListFilesRequest
- type ListFilesResponse
- type ListFineTuneEventsRequest
- type ListFineTuneEventsResponse
- type ListFineTunesRequest
- type ListFineTunesResponse
- type ListMessageFilesRequest
- type ListMessageFilesResponse
- type ListMessagesRequest
- type ListMessagesResponse
- type ListRunStepsRequest
- type ListRunStepsResponse
- type ListRunsRequest
- type ListRunsResponse
- type MessageFile
- type Model
- type Models
- type RateLimiters
- type Role
- type Run
- type RunStatus
- type RunStep
- type SubmitToolOutputsRequest
- type SubmitToolOutputsResponse
- type Thread
- type ThreadMessage
- type ThreadMessageContent
- type UpdateAssistantRequest
- type UpdateMessageRequest
- type UpdateMessageResponse
- type UpdateRunRequest
- type UpdateRunResponse
- type UpdateThreadRequest
- type UpdateThreadResponse
- type UploadFileRequest
- type UploadFileResponse
Examples ¶
Constants ¶
This section is empty.
Variables ¶
var ( FunctionCallAuto = FunctionCallControlAuto{} FunctionCallNone = FunctionCallControlNone{} )
var RateLimits = NewRateLimiters()
RateLimits is the default rate limiters for the OpenAI API.
Multiple Organizations ¶
If using multiple organizations, users should create their own rate limiters using the `NewRateLimiters()` function.
Functions ¶
func FunctionCallArgumentValue ¶
func FunctionCallArgumentValue[T any](name string, args FunctionCallArguments) (T, error)
FunctionCallArgumentValue returns the value of the argument with the given name.
func WaitForRun ¶
func WaitForRun(ctx context.Context, client *Client, threadID, runID string, interval time.Duration) error
WaitForRun polls the API at the given inter until the run is completed, failed, cancelled, or expired.
It returns nil if the run completed successfully, or an error if the run failed, was cancelled, or expired.
Types ¶
type Assistant ¶
type Assistant struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` Name string `json:"name"` Description string `json:"description"` Model string `json:"model"` Instructions string `json:"instructions"` Tools []map[string]any `json:"tools"` FileIDs []string `json:"file_ids"` Metadata map[string]any `json:"metadata"` }
https://platform.openai.com/docs/api-reference/assistants/object
type AssistantFile ¶
type AssistantFile struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` AssistantID string `json:"assistant_id"` }
https://platform.openai.com/docs/api-reference/assistants/file-object
type AssistantToolOutput ¶
type AudioTranscriptableFile ¶
type AudioTranscriptableFile interface { io.ReadCloser Name() string }
func NewAudioTranscriptableFileFromReadCloser ¶
func NewAudioTranscriptableFileFromReadCloser(rc io.ReadCloser, name string) AudioTranscriptableFile
type AudioTranscriptionFile ¶
type AudioTranscriptionFile struct { ReadCloser *AudioTranscriptionFileReadCloser File *os.File }
AudioTranscriptionFile is a file to be used in a CreateAudioTranscriptionRequest, allowing a caller to provide various types of file types.
Only provide one of the fields in this struct.
https://platform.openai.com/docs/api-reference/audio/create#audio/create-file
type AudioTranscriptionFileReadCloser ¶
type AudioTranscriptionFileReadCloser struct { io.ReadCloser // contains filtered or unexported fields }
func (*AudioTranscriptionFileReadCloser) Name ¶
func (a *AudioTranscriptionFileReadCloser) Name() string
type CancelFineTuneRequest ¶
type CancelFineTuneRequest struct {
ID string `json:"id"`
}
https://platform.openai.com/docs/api-reference/fine-tunes/cancel
type CancelFineTuneResponse ¶
type CancelFineTuneResponse struct { ID string `json:"id"` Object string `json:"object"` Model string `json:"model"` CreatedAt int `json:"created_at"` Events []any `json:"events"` FineTunedModel any `json:"fine_tuned_model"` Hyperparams any `json:"hyperparams"` OrganizationID string `json:"organization_id"` ResultFiles []any `json:"result_files"` Status string `json:"status"` ValidationFiles []any `json:"validation_files"` TrainingFiles []struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose string `json:"purpose"` } `json:"training_files"` UpdatedAt int `json:"updated_at"` }
https://platform.openai.com/docs/api-reference/fine-tunes/cancel
type CancelRunRequest ¶
type CancelRunRequest struct { // https://platform.openai.com/docs/api-reference/runs/cancelRun#runs-cancelrun-thread_id // // Required. ThreadID string // https://platform.openai.com/docs/api-reference/runs/cancelRun#runs-cancelrun-run_id // // Required. RunID string }
https://platform.openai.com/docs/api-reference/runs/cancelRun
type ChatMessage ¶
type ChatMessage struct { // Role is the role of the message, e.g. "user" or "bot". // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-role // // Required. Role string `json:"role"` // Content is the text of the message. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-content // // Optional. Content string `json:"content"` // Name is the author of this message. It is required if role is function, // and it should be the name of the function whose response is in the content. // // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-name // // Optional. Name string `json:"name,omitempty"` // FunctionCall the name and arguments of a function that should be called, // as generated by the model. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call // // Optional. FunctionCall *FunctionCall `json:"function_call,omitempty"` }
type ChatMessageStreamChunk ¶
type ChatMessageStreamChunk struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` Model string `json:"model"` Choices []struct { // Delta is either for role or content. Delta struct { Role *string `json:"role"` Content *string `json:"content"` } `json:"delta"` Index int `json:"index"` FinishReason any `json:"finish_reason"` } `json:"choices"` }
func (*ChatMessageStreamChunk) ContentDelta ¶
func (c *ChatMessageStreamChunk) ContentDelta() bool
Content returns the content of the message, or an error if there are no choices.
func (*ChatMessageStreamChunk) FirstChoice ¶
func (c *ChatMessageStreamChunk) FirstChoice() (string, error)
Content returns the content of the message, or an error if there are no choices.
type ChatRole ¶
type ChatRole = string
ChatRole is a role that can be used in a chat session, either “system”, “user”, or “assistant”.
type Client ¶
type Client struct { // APIKey is the API key to use for requests. APIKey string // HTTPClient is the HTTP client to use for requests. HTTPClient *http.Client // Organization is the organization to use for requests. Organization string }
Client is a client for the OpenAI API.
https://platform.openai.com/docs/api-reference
func NewClient ¶
func NewClient(apiKey string, opts ...ClientOption) *Client
NewClient returns a new Client with the given API key.
Example ¶
c := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
func (*Client) CancelFineTune ¶
func (c *Client) CancelFineTune(ctx context.Context, req *CancelFineTuneRequest) (*CancelFineTuneResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/cancel
func (*Client) CancelRun ¶
func (c *Client) CancelRun(ctx context.Context, req *CancelRunRequest) error
https://platform.openai.com/docs/api-reference/runs/cancelRun
func (*Client) CreateAssistant ¶
func (c *Client) CreateAssistant(ctx context.Context, req *CreateAssistantRequest) (*CreateAssistantResponse, error)
https://platform.openai.com/docs/api-reference/assistants/create
func (*Client) CreateAssistantFile ¶
func (c *Client) CreateAssistantFile(ctx context.Context, req *CreateAssistantFileRequest) (*CreateAssistantFileResponse, error)
https://platform.openai.com/docs/api-reference/assistants/createAssistantFile
func (*Client) CreateAudioTranscription ¶
func (c *Client) CreateAudioTranscription(ctx context.Context, req *CreateAudioTranscriptionRequest) (CreateAudioTranscriptionResponse, error)
CreateAudioTranscription transcribes audio into the input language.
func (*Client) CreateChat ¶
func (c *Client) CreateChat(ctx context.Context, req *CreateChatRequest) (*CreateChatResponse, error)
CreateChat sends a chat request to the API to obtain a chat response, creating a completion for the included chat messages (the conversation context and history).
Example ¶
var history []openai.ChatMessage{ { Role: openai.ChatRoleSystem, Content: "You are a helpful assistant for this example.", }, { Role: openai.ChatRoleUser, Content: "Hello!", // Get input from user. }, } resp, _ := client.CreateChat(ctx, &openai.CreateChatRequest{ Model: openai.ModelGPT35Turbo, Messages: history, }) fmt.Println(resp.Choices[0].Message.Content) // Hello how may I help you today? // Update history, summarize, forget, etc. Then repeat. history = appened(history, resp.Choices[0].Message)
https://platform.openai.com/docs/api-reference/chat/create
Example ¶
package main import ( "context" "fmt" "os" "strings" "github.com/picatz/openai" ) func main() { c := openai.NewClient(os.Getenv("OPENAI_API_KEY")) ctx := context.Background() messages := []openai.ChatMessage{ { Role: openai.ChatRoleSystem, Content: "You are a helpful assistant familiar with children's stories, and answer in only single words.", }, { Role: "user", Content: "Clifford is a big dog, but what color is he?", }, } resp, err := c.CreateChat(ctx, &openai.CreateChatRequest{ Model: openai.ModelGPT35Turbo, Messages: messages, }) if err != nil { panic(err) } fmt.Println(strings.ToLower(strings.TrimRight(strings.TrimSpace(resp.Choices[0].Message.Content), "."))) }
Output: red
func (*Client) CreateCompletion
deprecated
func (c *Client) CreateCompletion(ctx context.Context, req *CreateCompletionRequest) (*CreateCompletionResponse, error)
CreateCompletion performs a "completion" request using the OpenAI API.
Warning ¶
The completions API endpoint received its final update in July 2023 and has a different interface than the new chat completions endpoint. Instead of the input being a list of messages, the input is a freeform text string called a prompt.
Example ¶
resp, _ := client.CreateCompletion(ctx, &openai.CreateCompletionRequest{ Model: openai.ModelDavinci, Prompt: []string{"Once upon a time"}, MaxTokens: 16, })
Deprecated: github.com/picatz/openai.Client.CreateCompletion is deprecated (legacy). Use github.com/picatz/openai.Client.CreateChat instead.
https://platform.openai.com/docs/api-reference/completions/create
Example ¶
package main import ( "context" "fmt" "os" "github.com/picatz/openai" ) func main() { c := openai.NewClient(os.Getenv("OPENAI_API_KEY")) ctx := context.Background() resp, err := c.CreateCompletion(ctx, &openai.CreateCompletionRequest{ Model: openai.ModelDavinci, Prompt: []string{"The cow jumped over the"}, MaxTokens: 1, N: 1, }) if err != nil { panic(err) } fmt.Println(resp.Choices[0].Text) }
Output: moon
func (*Client) CreateEdit
deprecated
func (c *Client) CreateEdit(ctx context.Context, req *CreateEditRequest) (*CreateEditResponse, error)
CreateEdit performs a "edit" request using the OpenAI API.
Warning ¶
Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001) will need to migrate to GPT-3.5 Turbo by January 4, 2024.
Example ¶
resp, _ := client.CreateEdit(ctx, &CreateEditRequest{ Model: openai.ModelTextDavinciEdit001, Instruction: "Change the word 'test' to 'example'", Input: "This is a test", })
Deprecated: github.com/picatz/openai.Client.CreateEdit is deprecated (legacy). Use github.com/picatz/openai.Client.CreateChat instead.
https://platform.openai.com/docs/api-reference/edits/create
Example ¶
package main import ( "context" "fmt" "os" "strings" "github.com/picatz/openai" ) func main() { c := openai.NewClient(os.Getenv("OPENAI_API_KEY")) ctx := context.Background() resp, err := c.CreateEdit(ctx, &openai.CreateEditRequest{ Model: openai.ModelTextDavinciEdit001, Instruction: "ONLY change the word 'test' to 'example', with no other changes", Input: "This is a test", }) if err != nil { panic(err) } // Get the words from the response. words := strings.Split(resp.Choices[0].Text, " ") fmt.Println(words[len(words)-1]) }
Output: example
func (*Client) CreateEmbedding ¶
func (c *Client) CreateEmbedding(ctx context.Context, req *CreateEmbeddingRequest) (*CreateEmbeddingResponse, error)
CreateEmbedding performs a "embedding" request using the OpenAI API.
Example ¶
resp, _ := c.CreateEmbedding(ctx, &openai.CreateEmbeddingRequest{ Model: openai.ModelTextEmbeddingAda002, Input: "The food was delicious and the waiter...", })
func (*Client) CreateFineTune ¶
func (c *Client) CreateFineTune(ctx context.Context, req *CreateFineTuneRequest) (*CreateFineTuneResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/create
func (*Client) CreateImage ¶
func (c *Client) CreateImage(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error)
CreateImage performs a "image" request using the OpenAI API.
Example ¶
resp, _ := c.CreateImage(ctx, &openai.CreateImageRequest{ Prompt: "Golang-style gopher mascot wearing an OpenAI t-shirt", N: 1, Size: "256x256", ResponseFormat: "url", })
https://platform.openai.com/docs/api-reference/images/create
func (*Client) CreateMessage ¶
func (c *Client) CreateMessage(ctx context.Context, req *CreateMessageRequest) (*CreateMessageResponse, error)
https://platform.openai.com/docs/api-reference/messages/createMessage
func (*Client) CreateModeration ¶
func (c *Client) CreateModeration(ctx context.Context, req *CreateModerationRequest) (*CreateModerationResponse, error)
CreateModeration performs a "moderation" request using the OpenAI API.
Example ¶
resp, _ := c.CreateModeration(ctx, &openai.CreateModerationRequest{ Input: "I want to kill them.", })
func (*Client) CreateRun ¶
func (c *Client) CreateRun(ctx context.Context, req *CreateRunRequest) (*CreateRunResponse, error)
https://platform.openai.com/docs/api-reference/runs/createRun
func (*Client) CreateSpeech ¶
func (c *Client) CreateSpeech(ctx context.Context, req *CreateSpeechRequest) (io.ReadCloser, error)
https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-response
func (*Client) CreateThread ¶
func (c *Client) CreateThread(ctx context.Context, req *CreateThreadRequest) (*CreateThreadResponse, error)
https://platform.openai.com/docs/api-reference/threads/createThread
func (*Client) CreateThreadAndRun ¶
func (c *Client) CreateThreadAndRun(ctx context.Context, req *CreateThreadAndRunRequest) (*CreateThreadAndRunResponse, error)
https://platform.openai.com/docs/api-reference/runs/createThreadAndRun
func (*Client) DeleteAssistant ¶
func (c *Client) DeleteAssistant(ctx context.Context, req *DeleteAssistantRequest) error
func (*Client) DeleteAssistantFile ¶
func (c *Client) DeleteAssistantFile(ctx context.Context, req *DeleteAssistantFileRequest) error
https://platform.openai.com/docs/api-reference/assistants/deleteAssistantFile
func (*Client) DeleteFile ¶
func (c *Client) DeleteFile(ctx context.Context, req *DeleteFileRequest) (*DeleteFileResponse, error)
DeleteFile performs a "delete file" request using the OpenAI API.
Example ¶
resp, _ := c.DeleteFile(ctx, &openai.DeleteFileRequest{ ID: "file-123", })
func (*Client) DeleteFineTuneModel ¶
func (c *Client) DeleteFineTuneModel(ctx context.Context, req *DeleteFineTuneModelRequest) (*DeleteFineTuneModelResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/delete-model
func (*Client) DeleteThread ¶
func (c *Client) DeleteThread(ctx context.Context, req *DeleteThreadRequest) error
https://platform.openai.com/docs/api-reference/threads/deleteThread
func (*Client) GetAssistant ¶
func (c *Client) GetAssistant(ctx context.Context, req *GetAssistantRequest) (*GetAssistantResponse, error)
https://platform.openai.com/docs/api-reference/assistants/get#assistants/get-id
func (*Client) GetAssistantFile ¶
func (c *Client) GetAssistantFile(ctx context.Context, req *GetAssistantFileRequest) (*GetAssistantFileResponse, error)
func (*Client) GetFileContent ¶
func (c *Client) GetFileContent(ctx context.Context, req *GetFileContentRequest) (*GetFileContentResponse, error)
GetFileContent performs a "get file content (retrieve content)" request using the OpenAI API.
Example ¶
resp, _ := c.GetFileContent(ctx, &openai.GetFileContentRequest{ ID: "file-123", })
https://platform.openai.com/docs/api-reference/files/retrieve-content
func (*Client) GetFileInfo ¶
func (c *Client) GetFileInfo(ctx context.Context, req *GetFileInfoRequest) (*GetFileInfoResponse, error)
GetFileInfo performs a "get file info (retrieve)" request using the OpenAI API.
Example ¶
resp, _ := c.GetFileInfo(ctx, &openai.GetFileRequest{ ID: "file-123", })
https://platform.openai.com/docs/api-reference/files/retrieve
func (*Client) GetFineTune ¶
func (c *Client) GetFineTune(ctx context.Context, req *GetFineTuneRequest) (*GetFineTuneResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/retrieve
func (*Client) GetMessage ¶
func (c *Client) GetMessage(ctx context.Context, req *GetMessageRequest) (*GetMessageResponse, error)
func (*Client) GetMessageFile ¶
func (c *Client) GetMessageFile(ctx context.Context, req *GetMessageFileRequest) (*GetMessageFileResponse, error)
func (*Client) GetRun ¶
func (c *Client) GetRun(ctx context.Context, req *GetRunRequest) (*GetRunResponse, error)
func (*Client) GetRunStep ¶
func (c *Client) GetRunStep(ctx context.Context, req *GetRunStepRequest) (*GetRunStepResponse, error)
https://platform.openai.com/docs/api-reference/runs/getRunStep
func (*Client) GetThread ¶
func (c *Client) GetThread(ctx context.Context, req *GetThreadRequest) (*GetThreadResponse, error)
func (*Client) ListAssistantFiles ¶
func (c *Client) ListAssistantFiles(ctx context.Context, req *ListAssistantFilesRequest) (*ListAssistantFilesResponse, error)
https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles
func (*Client) ListAssistants ¶
func (c *Client) ListAssistants(ctx context.Context, req *ListAssistantsRequest) (*ListAssistantsResponse, error)
https://platform.openai.com/docs/api-reference/assistants/listAssistants
func (*Client) ListFiles ¶
func (c *Client) ListFiles(ctx context.Context, req *ListFilesRequest) (*ListFilesResponse, error)
ListFiles performs a "list files" request using the OpenAI API.
Example ¶
resp, _ := c.ListFiles(ctx, &openai.ListFilesRequest{})
func (*Client) ListFineTuneEvents ¶
func (c *Client) ListFineTuneEvents(ctx context.Context, req *ListFineTuneEventsRequest) (*ListFineTuneEventsResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/events
func (*Client) ListFineTunes ¶
func (c *Client) ListFineTunes(ctx context.Context, req *ListFineTunesRequest) (*ListFineTunesResponse, error)
https://platform.openai.com/docs/api-reference/fine-tunes/list
func (*Client) ListMessageFiles ¶
func (c *Client) ListMessageFiles(ctx context.Context, req *ListMessageFilesRequest) (*ListMessageFilesResponse, error)
func (*Client) ListMessages ¶
func (c *Client) ListMessages(ctx context.Context, req *ListMessagesRequest) (*ListMessagesResponse, error)
func (*Client) ListModels ¶
ListModels list model identifiers that can be used with the OpenAI API.
Example ¶
resp, _ := client.ListModels(ctx) for _, model := range resp.Data { fmt.Println(model.ID) }
func (*Client) ListRunSteps ¶
func (c *Client) ListRunSteps(ctx context.Context, req *ListRunStepsRequest) (*ListRunStepsResponse, error)
https://platform.openai.com/docs/api-reference/runs/listRunSteps
func (*Client) SubmitToolOutputs ¶
func (c *Client) SubmitToolOutputs(ctx context.Context, req *SubmitToolOutputsRequest) (*SubmitToolOutputsResponse, error)
https://platform.openai.com/docs/api-reference/runs/submitToolOutputs
func (*Client) UpdateAssistant ¶
func (*Client) UpdateMessage ¶
func (c *Client) UpdateMessage(ctx context.Context, req *UpdateMessageRequest) (*UpdateMessageResponse, error)
func (*Client) UpdateRun ¶
func (c *Client) UpdateRun(ctx context.Context, req *UpdateRunRequest) (*UpdateRunResponse, error)
https://platform.openai.com/docs/api-reference/runs/modifyRun
func (*Client) UpdateThread ¶
func (c *Client) UpdateThread(ctx context.Context, req *UpdateThreadRequest) (*UpdateThreadResponse, error)
func (*Client) UploadFile ¶
func (c *Client) UploadFile(ctx context.Context, req *UploadFileRequest) (*UploadFileResponse, error)
UploadFile performs a "upload file" request using the OpenAI API.
Example ¶
resp, _ := c.UploadFile(ctx, &openai.UploadFileRequest{ Name: "fine-tune.jsonl", Purpose: "fine-tune", })
type ClientOption ¶
type ClientOption func(*Client)
ClientOption is a function that configures a Client.
func WithHTTPClient ¶
func WithHTTPClient(c *http.Client) ClientOption
WithHTTPClient is a ClientOption that sets the HTTP client to use for requests.
If the client is nil, then http.DefaultClient is used
func WithOrganization ¶
func WithOrganization(org string) ClientOption
WithOrganization is a ClientOption that sets the organization to use for requests.
https://platform.openai.com/docs/api-reference/authentication
type CreateAssistantFileRequest ¶
type CreateAssistantFileRequest struct { // https://platform.openai.com/docs/api-reference/assistants/createAssistantFile#assistants-createassistantfile-assistant_id // // Required. AssistantID string `json:"assistant_id"` // https://platform.openai.com/docs/api-reference/assistants/createAssistantFile#assistants-createassistantfile-file // // Required. FileID string `json:"file_id"` }
https://platform.openai.com/docs/api-reference/assistants/createAssistantFile
type CreateAssistantFileResponse ¶
type CreateAssistantFileResponse = AssistantFile
type CreateAssistantRequest ¶
type CreateAssistantRequest struct { // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-model // // Required. Model string `json:"model"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-instructions // // Optional. Instructions string `json:"instructions,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-name // // Optional. Name string `json:"name,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-description // // Optional. Description string `json:"description,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-tools // // Optional. Tools []map[string]any `json:"tools,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-file_ids // // Optional. FileIDs []string `json:"file_ids,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/assistants/create
type CreateAssistantResponse ¶
type CreateAssistantResponse = Assistant
https://platform.openai.com/docs/api-reference/assistants/create
type CreateAudioTranscriptionRequest ¶
type CreateAudioTranscriptionRequest struct { // https://platform.openai.com/docs/api-reference/audio/create#audio/create-file // // Required. File AudioTranscriptableFile // https://platform.openai.com/docs/api-reference/audio/create#audio/create-model // // Required. Model string // https://platform.openai.com/docs/api-reference/audio/create#audio/create-prompt // // Optional. Prompt string // The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. // // https://platform.openai.com/docs/api-reference/audio/create#audio/create-response_format // // Optional. Defaults to "json". ResponseFormat string // https://platform.openai.com/docs/api-reference/audio/create#audio/create-temperature // // Optional. Temperature float64 // https://platform.openai.com/docs/api-reference/audio/create#audio/create-language // // Optional. Language string }
type CreateAudioTranscriptionResponse ¶
type CreateAudioTranscriptionResponse interface {
Text() string
}
type CreateAudioTranscriptionResponseJSON ¶
type CreateAudioTranscriptionResponseJSON struct {
RawText string `json:"text"`
}
https://platform.openai.com/docs/api-reference/audio/create
func (*CreateAudioTranscriptionResponseJSON) Text ¶
func (a *CreateAudioTranscriptionResponseJSON) Text() string
type CreateChatRequest ¶
type CreateChatRequest struct { // The model to use for the chat (e.g. "gpt3.5-turbo" or "gpt4"). // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-model // // Required. Model string `json:"model,omitempty"` // The context window of the conversation, which is a list of messages. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages // // Required. Messages []ChatMessage `json:"messages,omitempty"` // https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature // // Optional. Temperature float64 `json:"temperature,omitempty"` // https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p // // Optional. TopP float64 `json:"top_p,omitempty"` // The number of responses to return, which is typically 1 (the default). // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-n // // Optional. N int `json:"n,omitempty"` // Enable streaming mode, which will return a stream instead of a list of // responses. This is useful for longer messages, where the caller can // process the response incrementally, instead of waiting for the entire // response to be returned. // // You can use this to enable a fun "typing" effect while the chat bot // is generating the response, or start transmitting the response as // soon as the first few tokens are available. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream // // Optional. Stream bool `json:"stream,omitempty"` // Up to 4 sequences where the API will stop generating further tokens. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-stop // // Optional. Stop []string `json:"stop,omitempty"` // The maximum number of tokens to generate in the chat completion. // // The total length of input tokens and generated tokens is limited // by the model's context length. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens // // Optional. MaxTokens int `json:"max_tokens,omitempty"` // Number between -2.0 and 2.0. Positive values penalize new tokens based on whether // they appear in the text so far, increasing the model's likelihood to talk about new topics. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty // // Optional. PresencePenalty float64 `json:"presence_penalty,omitempty"` // Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing // frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty // // Optional. FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // Modify the likelihood of specified tokens appearing in the completion. // // This is a json object that maps tokens (specified by their token ID in the tokenizer) // to an associated bias value from -100 to 100. Mathematically, the bias is added to // the logits generated by the model prior to sampling. The exact effect will vary per // model, but values between -1 and 1 should decrease or increase likelihood of selection; // values like -100 or 100 should result in a ban or exclusive selection of the relevant token. // // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias // // Optional. LogitBias map[string]float64 `json:"logit_bias,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. // // https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-user // // Optional. User string `json:"user,omitempty"` // Functions are the functions that can be called by the model. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions // // Optional. Functions []*Function `json:"functions,omitempty"` // Controls how the model responds to function calls. "none" means the model does not // call a function, and responds to the end-user. "auto" means the model can pick // between an end-user or calling a function. Specifying a particular function // via {"name":\ "my_function"} forces the model to call that function. "none" // is the default when no functions are present. "auto" is the default if // functions are present. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call // // Optional. FunctionCall FunctionCallControl `json:"function_call,omitempty"` }
CreateChatRequest is sent to the API, which will return a chat response.
This is the substrate for that OpenAI chat API, which can be used for enabling "chat sessions". The API is designed to be used in a loop, where the response from the previous request is typically used as the input for the next request, specifcally the `messages` field, which contains the current "context window" of the conversation that must be maintained by the caller.
This is where the art of building a chat bot comes in, as the caller must decide how to manage the context window, e.g. how to maintain the long term memory of the conversation; what to include in the next request, and what to discard; how to handle the "end of conversation" signal, etc.
To identify similar messages from past "memories", the caller can use the embedding API to obtain embeddings for the messages, and then use a similarity metric to identify similar messages; cosine similarity is often used, but it is not the only option.
type CreateChatResponse ¶
type CreateChatResponse struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` Model string `json:"model"` Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` Choices []struct { Message ChatMessage `json:"message"` FinishReason string `json:"finish_reason"` Index int `json:"index"` } `json:"choices"` // https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream Stream io.ReadCloser `json:"-"` }
CreateChatResponse is recieved in response to a chat request.
https://platform.openai.com/docs/api-reference/chat/create
func (*CreateChatResponse) FirstChoice ¶
func (r *CreateChatResponse) FirstChoice() (*ChatMessage, error)
FirstChoice returns the first choice in the response, or an error if there are no choices.
func (*CreateChatResponse) RandomChoice ¶
func (r *CreateChatResponse) RandomChoice() (*ChatMessage, error)
RandomChoice returns a random choice in the response, or an error if there are no choices.
func (*CreateChatResponse) ReadStream ¶
func (r *CreateChatResponse) ReadStream(ctx context.Context, cb func(*ChatMessageStreamChunk) error) error
ReadStream reads the stream, applying the callback to each message.
Messages are sent via sever-sent events (SSE).
type CreateCompletionRequest ¶
type CreateCompletionRequest struct { // ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. // // https://platform.openai.com/docs/api-reference/completions/create#completions/create-model Model string `json:"model"` // The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. // // Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model // will generate as if from the beginning of a new document. // // https://platform.openai.com/docs/api-reference/completions/create#completions/create-prompt Prompt []string `json:"prompt"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-suffix Suffix string `json:"suffix,omitempty"` // The maximum number of tokens to generate in the completion. // // The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context // length of 2048 tokens (except for the newest models, which support 4096). // // Defaults to 16 if not specified. // // https://platform.openai.com/docs/api-reference/completions/create#completions/create-max_tokens MaxTokens int `json:"max_tokens,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature // // Defaults to 1 if not specified. Temperature float64 `json:"temperature,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-top_p // // Defaults to 1 if not specified. TopP float64 `json:"top_p,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-n // // Defaults to 1 if not specified. N int `json:"n,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-stream // // Defaults to false if not specified. Stream bool `json:"stream,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-logprobs // // Defaults to nil. LogProbs *int `json:"logprobs,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-echo // // Defaults to false if not specified. Echo bool `json:"echo,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-stop Stop []string `json:"stop,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty // // Defaults to 0 if not specified. PresencePenalty int `json:"presence_penalty,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty // // Defaults to 0 if not specified. FrequencyPenalty int `json:"frequency_penalty,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-best_of // // Defaults to 1 if not specified. // // WARNING: Because this parameter generates many completions, it can quickly consume your token quota. // Use carefully and ensure that you have reasonable settings for max_tokens and stop. BestOf int `json:"best_of,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias // // Defaults to nil. LogitBias map[string]float64 `json:"logit_bias,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-user // // Defaults to nil. User string `json:"user,omitempty"` }
CreateCompletionRequest contains information for a "completion" request to the OpenAI API. This is the fundamental request type for the API.
https://platform.openai.com/docs/api-reference/completions/create
type CreateCompletionResponse ¶
type CreateCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` Model string `json:"model"` Choices []struct { Text string `json:"text"` Index int `json:"index"` Logprobs interface{} `json:"logprobs"` FinishReason string `json:"finish_reason"` } `json:"choices"` Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` }
CreateCompletionResponse is the response from a "completion" request to the OpenAI API.
https://platform.openai.com/docs/api-reference/completions/create
type CreateEditRequest ¶
type CreateEditRequest struct { // https://platform.openai.com/docs/api-reference/edits/create#edits/create-model // // Required. Model string `json:"model"` // https://platform.openai.com/docs/api-reference/edits/create#edits/create-instruction // // Required. Instruction string `json:"instruction"` // https://platform.openai.com/docs/api-reference/edits/create#edits/create-input Input string `json:"input"` // https://platform.openai.com/docs/api-reference/edits/create#edits/create-n N int `json:"n,omitempty"` // https://platform.openai.com/docs/api-reference/edits/create#edits/create-temperature Temperature float64 `json:"temperature,omitempty"` // https://platform.openai.com/docs/api-reference/edits/create#edits/create-top-p TopP float64 `json:"top_p,omitempty"` }
CreateEditRequest is the request for a "edit" request to the OpenAI API.
type CreateEditResponse ¶
type CreateEditResponse struct { Object string `json:"object"` Created int `json:"created"` Choices []struct { Text string `json:"text"` Index int `json:"index"` } `json:"choices"` Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` }
type CreateEmbeddingRequest ¶
type CreateEmbeddingRequest struct { // https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-model // // Required. The text to embed. Model string `json:"model"` // https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-input // // Required. The text to embed. Input string `json:"input"` // https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-user User string `json:"user,omitempty"` }
type CreateEmbeddingResponse ¶
type CreateEmbeddingResponse struct { Object string `json:"object"` Data []struct { Object string `json:"object"` Embedding []float64 `json:"embedding"` Index int `json:"index"` } `json:"data"` Model string `json:"model"` Usage struct { PromptTokens int `json:"prompt_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` }
CreateEmbeddingResponse ...
https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
type CreateFineTuneRequest ¶
type CreateFineTuneRequest struct { // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file // // Required. TrainingFile string `json:"training_file"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-validation_file // // Optional. ValidationFile string `json:"validation_file,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-model // // Optional. Defaults to "curie". Model string `json:"model,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-epochs // // Optional. Defaults to 4. Epochs int `json:"n_epochs,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-batch_size // // Optional. Defaults to 32. BatchSize int `json:"batch_size,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-learning_rate_multiplier // // Optional. Default depends on the batch size. LearningRateMultiplier float64 `json:"learning_rate_multiplier,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-prompt_loss_weight // // Optional. Defaults to 0.01 PromptLossWeight float64 `json:"prompt_loss_weight,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-compute_classification_metrics // // Optional. Defaults to false. ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_n_classes // // Optional, but required for multi-class classification. ClassificationNClasses int `json:"classification_n_classes,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_positive_class // // Optional, but required for binary classification. ClassificationPositiveClass string `json:"classification_positive_class,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_betas // // Optional, only used for binary classification. ClassificationBetas []float64 `json:"classification_betas,omitempty"` // https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-suffix // // A string of up to 40 characters that will be added to your fine-tuned model name. // // For example, a suffix of "custom-model-name" would produce a model name like // `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. // // Optional. Suffix string `json:"suffix,omitempty"` }
https://platform.openai.com/docs/api-reference/fine-tunes/create
type CreateFineTuneResponse ¶
type CreateFineTuneResponse struct { ID string `json:"id"` Object string `json:"object"` Model string `json:"model"` CreatedAt int `json:"created_at"` Events []struct { Object string `json:"object"` CreatedAt int `json:"created_at"` Level string `json:"level"` Message string `json:"message"` } `json:"events"` FineTunedModel interface{} `json:"fine_tuned_model"` Hyperparams struct { BatchSize int `json:"batch_size"` LearningRateMultiplier float64 `json:"learning_rate_multiplier"` NEpochs int `json:"n_epochs"` PromptLossWeight float64 `json:"prompt_loss_weight"` } `json:"hyperparams"` OrganizationID string `json:"organization_id"` ResultFiles []interface{} `json:"result_files"` Status string `json:"status"` ValidationFiles []interface{} `json:"validation_files"` TrainingFiles []struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose string `json:"purpose"` } `json:"training_files"` UpdatedAt int `json:"updated_at"` }
CreateFineTuneResponse is the response from a "create fine-tune" request.
https://platform.openai.com/docs/api-reference/fine-tunes/create
type CreateImageRequest ¶
type CreateImageRequest struct { // https://platform.openai.com/docs/api-reference/images/create#images/create-prompt // // Required. Max of 1,000 characters. Prompt string `json:"prompt"` // https://platform.openai.com/docs/api-reference/images/create#images-create-model // // Optional. Defaults to "dall-e-2". Model string `json:"model,omitempty"` // https://platform.openai.com/docs/api-reference/completions/create#completions/create-n // // Number of images to generate. Defaults to 1 if not specified. Most be between 1 and 10. N int `json:"n,omitempty"` // https://platform.openai.com/docs/api-reference/images/create#images/create-size // // Size of the image to generate. Must be one of 256x256, 512x512, or 1024x1024. Size string `json:"size,omitempty"` // https://platform.openai.com/docs/api-reference/images/create#images/create-response_format // // Defaults to "url". The format in which the generated images are returned. Must be one of "url" or "b64_json". ResponseFormat string `json:"response_format,omitempty"` // https://platform.openai.com/docs/api-reference/images/create#images/create-user User string `json:"user,omitempty"` // https://platform.openai.com/docs/api-reference/images/create#images-create-quality // // Optional. Either "standard" or "hd", defaults to "standard". Quality string `json:"quality,omitempty"` // https://platform.openai.com/docs/api-reference/images/create#images-create-style // // Optional. Either "vivid" or "natural", defaults to "vivid". Only valid for "dall-e-3" model. Style string `json:"style,omitempty"` }
https://platform.openai.com/docs/api-reference/images/create
type CreateImageResponse ¶
type CreateImageResponse struct { Created int `json:"created"` Data []struct { // One of the following: "url" or "b64_json" URL *string `json:"url"` B64JSON *string `json:"b64_json"` // If there were any prompt revisions made by the API. // Use this to refine further. RevisedPrompt *string `json:"revised_prompt"` } `json:"data"` }
CreateImageResponse ...
type CreateMessageRequest ¶
type CreateMessageRequest struct { // https://platform.openai.com/docs/api-reference/messages/createMessage#messages-createmessage-thread_id // // Required. ThreadID string `json:"-"` // https://platform.openai.com/docs/api-reference/messages/createMessage#messages-createmessage-role // // Required. Role string `json:"role"` // https://platform.openai.com/docs/api-reference/messages/createMessage#messages-createmessage-content // // Required. Content string `json:"content"` // https://platform.openai.com/docs/api-reference/messages/createMessage#messages-createmessage-file_ids // // Optional. FileIDs []string `json:"file_ids,omitempty"` // https://platform.openai.com/docs/api-reference/messages/createMessage#messages-createmessage-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/messages/createMessage
type CreateMessageResponse ¶
type CreateMessageResponse = ThreadMessage
https://platform.openai.com/docs/api-reference/messages/createMessage
type CreateModerationRequest ¶
type CreateModerationRequest struct { // https://platform.openai.com/docs/api-reference/moderations/create#moderations/create-model // // Optional. The model to use for moderation. Defaults to "text-moderation-latest". Model string `json:"model"` // https://platform.openai.com/docs/api-reference/moderations/create#moderations/create-input // // Required. The text to moderate. Input string `json:"input"` }
https://platform.openai.com/docs/api-reference/moderations/create
type CreateModerationResponse ¶
type CreateModerationResponse struct { ID string `json:"id"` Model string `json:"model"` Results []struct { Categories struct { Hate bool `json:"hate"` HateThreatening bool `json:"hate/threatening"` SelfHarm bool `json:"self-harm"` Sexual bool `json:"sexual"` SexualMinors bool `json:"sexual/minors"` Violence bool `json:"violence"` ViolenceGraphic bool `json:"violence/graphic"` } `json:"categories"` CategoryScores struct { Hate float64 `json:"hate"` HateThreatening float64 `json:"hate/threatening"` SelfHarm float64 `json:"self-harm"` Sexual float64 `json:"sexual"` SexualMinors float64 `json:"sexual/minors"` Violence float64 `json:"violence"` ViolenceGraphic float64 `json:"violence/graphic"` } `json:"category_scores"` Flagged bool `json:"flagged"` } `json:"results"` }
CreateModerationResponse ...
https://platform.openai.com/docs/guides/moderations/what-are-moderations
type CreateRunRequest ¶
type CreateRunRequest struct { // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-thread_id // // Required. ThreadID string `json:"-"` // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-assistant_id // // Required. AssistantID string `json:"assistant_id"` // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-model // // Optional. Defaults to the model associated with the assistant. Model string `json:"model,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-instructions // // Optional. Defaults to the instructions associated with the assistant. Instructions string `json:"instructions,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tools // // Optional. Defaults to the tools associated with the assistant. Tools []map[string]any `json:"tools,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/runs/createRun
type CreateRunResponse ¶
type CreateRunResponse = Run
https://platform.openai.com/docs/api-reference/runs/createRun
type CreateSpeechRequest ¶
type CreateSpeechRequest struct { // https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-model // // Required. Model string `json:"model"` // https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-input // // Required. Input string `json:"input"` // https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-voice // // Required. Voice string `json:"voice,omitempty"` // https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-response_format // // Optional. Defaults to "mp3". ResponseFormat string `json:"response_format,omitempty"` // https://platform.openai.com/docs/api-reference/audio/createSpeech#audio-createspeech-speed // // Optional. Defaults to 1. Speed float64 `json:"speed,omitempty"` }
https://platform.openai.com/docs/api-reference/audio/createSpeech
type CreateThreadAndRunRequest ¶
type CreateThreadAndRunRequest struct { // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-assistant_id // // Required. AssistantID string `json:"assistant_id"` // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-thread // // Optional. Thread *CreateThreadAndRunRequestInitialThread `json:"thread,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-model // // Optional. Defaults to the model associated with the assistant. Model string `json:"model,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-instructions // // Optional. Defaults to the instructions associated with the assistant. Instructions string `json:"instructions,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-tools // // Optional. Defaults to the tools associated with the assistant. Tools []map[string]any `json:"tools,omitempty"` // https://platform.openai.com/docs/api-reference/runs/createThreadAndRun#runs-createthreadandrun-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/runs/createThreadAndRun
type CreateThreadAndRunRequestInitialThread ¶
type CreateThreadAndRunRequestInitialThread struct { Messages []*CreateThreadAndRunRequestInitialThreadMessage `json:"messages,omitempty"` Metadata map[string]any `json:"metadata,omitempty"` }
type CreateThreadAndRunRequestInitialThreadMessage ¶
type CreateThreadAndRunResponse ¶
type CreateThreadAndRunResponse = Run
https://platform.openai.com/docs/api-reference/runs/createThreadAndRun
type CreateThreadRequest ¶
type CreateThreadRequest struct { // https://platform.openai.com/docs/api-reference/threads/createThread#threads-createthread-messages // // Optional. Messages []*ChatMessage `json:"messages,omitempty"` // https://platform.openai.com/docs/api-reference/threads/createThread#threads-createthread-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/threads/createThread
type CreateThreadResponse ¶
type CreateThreadResponse = Thread
https://platform.openai.com/docs/api-reference/threads/createThread
type DeleteAssistantFileRequest ¶
type DeleteAssistantFileRequest struct { // https://platform.openai.com/docs/api-reference/assistants/deleteAssistantFile#assistants-deleteassistantfile-assistant_id // // Required. AssistantID string `json:"assistant_id"` // https://platform.openai.com/docs/api-reference/assistants/deleteAssistantFile#assistants-deleteassistantfile-file_id // // Required. FileID string `json:"file_id"` }
https://platform.openai.com/docs/api-reference/assistants/deleteAssistantFile
type DeleteAssistantRequest ¶
type DeleteAssistantRequest struct { // https://platform.openai.com/docs/api-reference/assistants/delete#assistants/delete-id // // Required. ID string `json:"assistant_id"` }
https://platform.openai.com/docs/api-reference/assistants/deleteAssistant
type DeleteFileRequest ¶
type DeleteFileRequest struct { // ID of the file to delete. // // Required. ID string `json:"id"` }
type DeleteFileResponse ¶
type DeleteFileResponse struct { ID string `json:"id"` Object string `json:"object"` Deleted bool `json:"deleted"` }
DeleteFileResponse ...
type DeleteFineTuneModelRequest ¶
type DeleteFineTuneModelRequest struct { // https://platform.openai.com/docs/api-reference/fine-tunes/delete-model#fine-tunes/delete-model-model // // Required. ID string `json:"model"` }
https://platform.openai.com/docs/api-reference/fine-tunes/delete-model
type DeleteFineTuneModelResponse ¶
type DeleteFineTuneModelResponse struct { ID string `json:"id"` Object string `json:"object"` Deleted bool `json:"deleted"` }
https://platform.openai.com/docs/api-reference/fine-tunes/delete-model
type DeleteThreadRequest ¶
type DeleteThreadRequest struct { // https://platform.openai.com/docs/api-reference/threads/deleteThread#threads-deletethread-id // // Required. ID string `json:"thread_id"` }
https://platform.openai.com/docs/api-reference/threads/deleteThread
type Function ¶
type Function struct { // Name is the name of the function. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-name // // Required. Name string `json:"name"` // Description is a description of the function. // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-description // // Optional. Description string `json:"description,omitempty"` // Parameters are the arguments to the function. // // The parameters the functions accepts, described as a JSON Schema object. // See the guide for examples, and the JSON Schema reference for documentation // about the format. // // https://json-schema.org/understanding-json-schema/ // // https://platform.openai.com/docs/guides/gpt/function-calling // // https://platform.openai.com/docs/api-reference/chat/create#chat/create-parameters // // Required. Parameters *JSONSchema `json:"parameters,omitempty"` }
Function is a logical function that can be called by the model.
type FunctionCall ¶
type FunctionCall struct { Name string `json:"name"` Arguments FunctionCallArguments `json:"arguments"` }
FunctionCall describes a function call.
func (*FunctionCall) MarshalJSON ¶
func (f *FunctionCall) MarshalJSON() ([]byte, error)
MarshalJSON marshals the function call into a JSON string.
func (*FunctionCall) UnmarshalJSON ¶
func (f *FunctionCall) UnmarshalJSON(b []byte) error
Implement custom JSON marhsalling and unmarhsalling to handle arguments, which come from a JSON string from the API directly.
We turn this into a map[string]any that is a little easier to work with.
type FunctionCallArguments ¶
FunctionCallArguments is a map of argument name to value.
type FunctionCallControl ¶
type FunctionCallControl interface {
// contains filtered or unexported methods
}
FunctionCallControl is an option used to control the behavior of a function call in a chat request. It can be used to specify the name of the function to call, "none", or "auto" (the default).
https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call
type FunctionCallControlAuto ¶
type FunctionCallControlAuto struct{}
FunctionCallControlAuto is a function call option that indicates that the function to call should be determined automatically.
func (FunctionCallControlAuto) MarshalJSON ¶
func (FunctionCallControlAuto) MarshalJSON() ([]byte, error)
MarhsalJSON marshals the function call option into a JSON string.
type FunctionCallControlName ¶
type FunctionCallControlName string
FunctionCallControlName is a function call option that indicates that the function to call should be determined by the given name.
func FunctionCallName ¶
func FunctionCallName(name string) FunctionCallControlName
func (FunctionCallControlName) MarshalJSON ¶
func (f FunctionCallControlName) MarshalJSON() ([]byte, error)
MarhsalJSON marshals the function call option into a JSON string.
type FunctionCallControlNone ¶
type FunctionCallControlNone struct{}
FunctionCallControlNone is a function call option that indicates that no function should be called.
func (FunctionCallControlNone) MarshalJSON ¶
func (FunctionCallControlNone) MarshalJSON() ([]byte, error)
MarhsalJSON marshals the function call option into a JSON string.
type GetAssistantFileRequest ¶
type GetAssistantFileRequest struct { // https://platform.openai.com/docs/api-reference/assistants/getAssistantFile#assistants-getassistantfile-assistant_id // // Required. AssistantID string `json:"assistant_id"` // https://platform.openai.com/docs/api-reference/assistants/getAssistantFile#assistants-getassistantfile-file_id // // Required. FileID string `json:"file_id"` }
https://platform.openai.com/docs/api-reference/assistants/getAssistantFile
type GetAssistantFileResponse ¶
type GetAssistantFileResponse = AssistantFile
type GetAssistantRequest ¶
type GetAssistantRequest struct { // https://platform.openai.com/docs/api-reference/assistants/get#assistants/get-id // // Required. ID string `json:"assistant_id"` }
type GetAssistantResponse ¶
type GetAssistantResponse = Assistant
https://platform.openai.com/docs/api-reference/assistants/get#assistants/get-id
type GetFileContentRequest ¶
type GetFileContentRequest struct { // ID of the file to retrieve. // // Required. ID string `json:"id"` }
https://platform.openai.com/docs/api-reference/files/retrieve-content
type GetFileContentResponse ¶
type GetFileContentResponse struct { // Body is the file content returned by the OpenAI API. // // The caller is responsible for closing the body, and should do so as soon as possible. Body io.ReadCloser }
GetFileContentResponse ...
https://platform.openai.com/docs/api-reference/files/retrieve-content
type GetFileInfoRequest ¶
type GetFileInfoRequest struct { // ID of the file to retrieve. // // Required. ID string `json:"id"` }
https://platform.openai.com/docs/api-reference/files/retrieve
type GetFileInfoResponse ¶
type GetFileInfoResponse struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose string `json:"purpose"` }
GetFileInfoResponse ...
https://platform.openai.com/docs/api-reference/files/retrieve
type GetFineTuneRequest ¶
type GetFineTuneRequest struct {
ID string `json:"id"`
}
https://platform.openai.com/docs/api-reference/fine-tunes/retrieve
type GetFineTuneResponse ¶
type GetFineTuneResponse struct { ID string `json:"id"` Object string `json:"object"` Model string `json:"model"` CreatedAt int `json:"created_at"` Events []struct { Object string `json:"object"` CreatedAt int `json:"created_at"` Level string `json:"level"` Message string `json:"message"` } `json:"events"` FineTunedModel string `json:"fine_tuned_model"` Hyperparams struct { BatchSize int `json:"batch_size"` LearningRateMultiplier float64 `json:"learning_rate_multiplier"` NEpochs int `json:"n_epochs"` PromptLossWeight float64 `json:"prompt_loss_weight"` } `json:"hyperparams"` OrganizationID string `json:"organization_id"` ResultFiles []struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose string `json:"purpose"` } `json:"result_files"` Status string `json:"status"` ValidationFiles []any `json:"validation_files"` TrainingFiles []struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose string `json:"purpose"` } `json:"training_files"` UpdatedAt int `json:"updated_at"` }
https://platform.openai.com/docs/api-reference/fine-tunes/retrieve
type GetMessageFileRequest ¶
type GetMessageFileRequest struct { // https://platform.openai.com/docs/api-reference/messages/getMessageFile#messages-getmessagefile-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/messages/getMessageFile#messages-getmessagefile-message_id // // Required. MessageID string `json:"message_id"` // https://platform.openai.com/docs/api-reference/messages/getMessageFile#messages-getmessagefile-file_id // // Required. FileID string `json:"file_id"` }
https://platform.openai.com/docs/api-reference/messages/getMessageFile
type GetMessageFileResponse ¶
type GetMessageFileResponse = MessageFile
type GetMessageRequest ¶
type GetMessageRequest struct { // https://platform.openai.com/docs/api-reference/messages/getMessage#messages-getmessage-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/messages/getMessage#messages-getmessage-message_id // // Required. MessageID string `json:"message_id"` }
https://platform.openai.com/docs/api-reference/messages/getMessage
type GetMessageResponse ¶
type GetMessageResponse = ThreadMessage
https://platform.openai.com/docs/api-reference/messages/getMessage#messages-getmessage-response
type GetRunRequest ¶
type GetRunRequest struct { // https://platform.openai.com/docs/api-reference/runs/getRun#runs-getrun-thread_id // // Required. ThreadID string // https://platform.openai.com/docs/api-reference/runs/getRun#runs-getrun-run_id // // Required. RunID string }
type GetRunResponse ¶
type GetRunResponse = Run
type GetRunStepRequest ¶
type GetRunStepRequest struct { // https://platform.openai.com/docs/api-reference/runs/getRunStep#runs-getrunstep-thread_id // // Required. ThreadID string // https://platform.openai.com/docs/api-reference/runs/getRunStep#runs-getrunstep-run_id // // Required. RunID string // https://platform.openai.com/docs/api-reference/runs/getRunStep#runs-getrunstep-step_id // // Required. StepID string }
https://platform.openai.com/docs/api-reference/runs/getRunStep
type GetRunStepResponse ¶
type GetRunStepResponse = RunStep
https://platform.openai.com/docs/api-reference/runs/getRunStep
type GetThreadRequest ¶
type GetThreadRequest struct { // https://platform.openai.com/docs/api-reference/threads/getThread#threads-getthread-id // // Required. ID string `json:"thread_id"` }
https://platform.openai.com/docs/api-reference/threads/getThread
type GetThreadResponse ¶
type GetThreadResponse = Thread
https://platform.openai.com/docs/api-reference/threads/getThread#threads-getthread-response
type JSONSchema ¶
type JSONSchema struct { // Type is the type of the schema. Type string `json:"type,omitempty"` // Description is the description of the schema. Description string `json:"description,omitempty"` // Properties is the properties of the schema. Properties map[string]*JSONSchema `json:"properties,omitempty"` // Required is the required properties of the schema. Required []string `json:"required,omitempty"` // Enum is the enum of the schema. Enum []string `json:"enum,omitempty"` // Items is the items of the schema. Items *JSONSchema `json:"items,omitempty"` // AdditionalProperties is the additional properties of the schema. AdditionalProperties *JSONSchema `json:"additionalProperties,omitempty"` // Ref is the ref of the schema. Ref string `json:"$ref,omitempty"` // AnyOf is the anyOf of the schema. AnyOf []*JSONSchema `json:"anyOf,omitempty"` // AllOf is the allOf of the schema. AllOf []*JSONSchema `json:"allOf,omitempty"` // OneOf is the oneOf of the schema. OneOf []*JSONSchema `json:"oneOf,omitempty"` // Default is the default of the schema. Default any `json:"default,omitempty"` // Pattern is the pattern of the schema. Pattern string `json:"pattern,omitempty"` // MinItems is the minItems of the schema. MinItems int `json:"minItems,omitempty"` // MaxItems is the maxItems of the schema. MaxItems int `json:"maxItems,omitempty"` // UniqueItems is the uniqueItems of the schema. UniqueItems bool `json:"uniqueItems,omitempty"` // MultipleOf is the multipleOf of the schema. MultipleOf int `json:"multipleOf,omitempty"` // Min is the minimum of the schema. Min int `json:"min,omitempty"` // Max is the maximum of the schema. Max int `json:"max,omitempty"` // ExclusiveMin is the exclusiveMinimum of the schema. ExclusiveMin bool `json:"exclusiveMinimum,omitempty"` // ExclusiveMax is the exclusiveMaximum of the schema. ExclusiveMax bool `json:"exclusiveMaximum,omitempty"` }
JSONSchema is a JSON Schema.
https://json-schema.org/understanding-json-schema/reference/index.html
type ListAssistantFilesRequest ¶
type ListAssistantFilesRequest struct { // https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles#assistants-listassistantfiles-assistant_id // // Required. AssistantID string `json:"-"` // https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles#assistants-listassistantfiles-limit // // Optional. Defaults to 20. Limit int `json:"limit,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles#assistants-listassistantfiles-order // // Optional. Defaults to "desc". Order string `json:"order,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles#assistants-listassistantfiles-after // // Optional. After string `json:"after,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles#assistants-listassistantfiles-before // // Optional. Before string `json:"before,omitempty"` }
https://platform.openai.com/docs/api-reference/assistants/listAssistantFiles
type ListAssistantFilesResponse ¶
type ListAssistantFilesResponse struct {
Data []AssistantFile `json:"data"`
}
type ListAssistantsRequest ¶
type ListAssistantsRequest struct { // https://platform.openai.com/docs/api-reference/assistants/listAssistants#assistants-listassistants-limit // // Optional. Defaults to 20. Limit int `json:"limit,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistants#assistants-listassistants-order // // Optional. Defaults to "desc". Order string `json:"order,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistants#assistants-listassistants-after // // Optional. After string `json:"after,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/listAssistants#assistants-listassistants-before // // Optional. Before string `json:"before,omitempty"` }
type ListAssistantsResponse ¶
type ListAssistantsResponse struct {
Data []Assistant `json:"data"`
}
type ListFilesRequest ¶
type ListFilesRequest struct { // https://platform.openai.com/docs/api-reference/files/list#files-list-purpose // // Optional. Filter to only list files with the specified purpose (assistants, fine-tune, etc). Purpose string `json:"purpose,omitempty"` }
type ListFilesResponse ¶
type ListFineTuneEventsRequest ¶
type ListFineTuneEventsRequest struct { // https://platform.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-fine_tune_id // // Required. ID string `json:"id"` // https://platform.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-stream // // Optional. Stream bool `json:"stream"` }
https://platform.openai.com/docs/api-reference/fine-tunes/events
type ListFineTuneEventsResponse ¶
type ListFineTuneEventsResponse struct { Object string `json:"object"` Data []struct { Object string `json:"object"` CreatedAt int `json:"created_at"` Level string `json:"level"` Message string `json:"message"` } `json:"data"` // https://platform.openai.com/docs/api-reference/fine-tunes/events#fine-tunes/events-stream // // Only present if stream=true. Up to the caller to close the stream, e.g.: defer res.Stream.Close() Stream io.ReadCloser `json:"-"` }
https://platform.openai.com/docs/api-reference/fine-tunes/events
type ListFineTunesRequest ¶
type ListFineTunesRequest struct { }
https://platform.openai.com/docs/api-reference/fine-tunes/list
type ListFineTunesResponse ¶
type ListFineTunesResponse struct { Object string `json:"object"` Data []struct { ID string `json:"id"` Object string `json:"object"` Model string `json:"model"` CreatedAt int `json:"created_at"` FineTunedModel any `json:"fine_tuned_model"` Hyperparams map[string]any `json:"hyperparams"` OrganizationID string `json:"organization_id"` ResultFiles []any `json:"result_files"` Status string `json:"status"` ValidationFiles []any `json:"validation_files"` TrainingFiles []any `json:"training_files"` UpdatedAt int `json:"updated_at"` } `json:"data"` }
https://platform.openai.com/docs/api-reference/fine-tunes/list
type ListMessageFilesRequest ¶
type ListMessageFilesRequest struct { // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-message_id // // Required. MessageID string `json:"message_id"` // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-limit // // Optional. Defaults to 20. Limit int `json:"limit,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-order // // Optional. Defaults to "desc". Order string `json:"order,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-after // // Optional. After string `json:"after,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessageFiles#messages-listmessagefiles-before // // Optional. Before string `json:"before,omitempty"` }
https://platform.openai.com/docs/api-reference/messages/listMessageFiles
type ListMessageFilesResponse ¶
type ListMessageFilesResponse struct {
Data []MessageFile `json:"data"`
}
type ListMessagesRequest ¶
type ListMessagesRequest struct { // https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-limit // // Optional. Defaults to 20. Limit int `json:"limit,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-order // // Optional. Defaults to "desc". Order string `json:"order,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-after // // Optional. After string `json:"after,omitempty"` // https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-before // // Optional. Before string `json:"before,omitempty"` }
https://platform.openai.com/docs/api-reference/messages/listMessages
type ListMessagesResponse ¶
type ListMessagesResponse struct {
Data []ThreadMessage `json:"data"`
}
https://platform.openai.com/docs/api-reference/messages/listMessages#messages-listmessages-response
type ListRunStepsRequest ¶
type ListRunStepsRequest struct { // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-thread_id // // Required. ThreadID string // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-run_id // // Required. RunID string // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-limit // // Optional. Defaults to 20. Limit int // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-order // // Optional. Defaults to "desc". Order string // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-after // // Optional. After string // https://platform.openai.com/docs/api-reference/runs/listRunSteps#runs-listrunsteps-before // // Optional. Before string }
https://platform.openai.com/docs/api-reference/runs/listRunSteps
type ListRunStepsResponse ¶
type ListRunStepsResponse struct {
Data []RunStep `json:"data"`
}
https://platform.openai.com/docs/api-reference/runs/listRunSteps
type ListRunsRequest ¶
type ListRunsRequest struct { // https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-limit // // Optional. Defaults to 20. Limit int `json:"limit,omitempty"` // https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-order // // Optional. Defaults to "desc". Order string `json:"order,omitempty"` // https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-after // // Optional. After string `json:"after,omitempty"` // https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-before // // Optional. Before string `json:"before,omitempty"` }
https://platform.openai.com/docs/api-reference/runs/listRuns
type ListRunsResponse ¶
type ListRunsResponse struct {
Data []Run `json:"data"`
}
https://platform.openai.com/docs/api-reference/runs/listRuns#runs-listruns-response
type MessageFile ¶
type MessageFile struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` MessageID string `json:"message_id"` }
https://platform.openai.com/docs/api-reference/messages/file-object
type Model ¶
type Model = string
Model is a known OpenAI model identifier.
const ( // ModelAda is the Ada model. // // Ada is usually the fastest model and can perform tasks like parsing text, address correction and certain kinds of classification // tasks that don’t require too much nuance. Ada’s performance can often be improved by providing more context. // // Good at: Parsing text, simple classification, address correction, keywords // // Note: Any task performed by a faster model like Ada can be performed by a more powerful model like Curie or Davinci. // // https://beta.openai.com/docs/models/ada ModelAda Model = "ada" // ModelBabbage is the Babbage model. // // Babbage can perform straightforward tasks like simple classification. It’s also quite capable when it comes to Semantic Search // ranking how well documents match up with search queries. // // Good at: Moderate classification, semantic search classification // // https://beta.openai.com/docs/models/babbage ModelBabbage Model = "babbage" // ModelCurie is the Curie model. // // Curie is extremely powerful, yet very fast. While Davinci is stronger when it comes to analyzing complicated text, Curie is q // uite capable for many nuanced tasks like sentiment classification and summarization. Curie is also quite good at answering // questions and performing Q&A and as a general service chatbot. // // Good at: Language translation, complex classification, text sentiment, summarization // // https://beta.openai.com/docs/models/curie ModelCurie Model = "curie" // ModelDavinci is the Davinci model. // // Davinci is the most capable model family and can perform any task the other models can perform and often with less instruction. // For applications requiring a lot of understanding of the content, like summarization for a specific audience and creative content // generation, Davinci is going to produce the best results. These increased capabilities require more compute resources, so Davinci // costs more per API call and is not as fast as the other models. // // Another area where Davinci shines is in understanding the intent of text. Davinci is quite good at solving many kinds of logic problems // and explaining the motives of characters. Davinci has been able to solve some of the most challenging AI problems involving cause and effect. // // Good at: Complex intent, cause and effect, summarization for audience // // https://beta.openai.com/docs/models/davinci ModelDavinci Model = "davinci" // Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. // Also supports inserting completions within text. ModelTextDavinciEdit003 Model = "text-davinci-003" // Very capable, but faster and lower cost than Davinci. ModelTextCurie001 Model = "text-curie-001" // Capable of straightforward tasks, very fast, and lower cost. ModelBabbage001 Model = "text-babbage-001" // Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. ModelAda001 Model = "text-ada-001" // Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code. ModelCodeDavinci002 Model = "code-davinci-002" // Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications. ModelCodeCushman001 Model = "code-cushman-001" // Used for the CreateEdit API endpoint. ModelTextDavinciEdit001 Model = "text-davinci-edit-001" ModelCodeDavinciEdit001 Model = "code-davinci-edit-001" // https://platform.openai.com/docs/guides/embeddings/embedding-models ModelTextEmbeddingAda001 Model = "text-embedding-ada-001" // This is the previously recommend model for nearly all embedding use cases. // // https://openai.com/blog/new-and-improved-embedding-model ModelTextEmbeddingAda002 Model = "text-embedding-ada-002" // These models are the latest and greatest for embedding use cases. // // https://openai.com/blog/new-embedding-models-and-api-updates ModelTextEmbedding3Small Model = "text-embedding-3-small" ModelTextEmbedding3Large Model = "text-embedding-3-large" // https://platform.openai.com/docs/api-reference/chat/create#chat/create-model ModelGPT35Turbo Model = "gpt-3.5-turbo" ModelGPT35Turbo0301 Model = "gpt-3.5-turbo-0301" ModelGPT35Turbo0613 Model = "gpt-3.5-turbo-0613" ModelGPT35Turbo1106 Model = "gpt-3.5-turbo-1106" ModelGPT35Turbo16k Model = "gpt-3.5-turbo-16k" ModelGPT35Turbo16k0613 Model = "gpt-3.5-turbo-16k-0613" ModelGPT35TurboInstruct Model = "gpt-3.5-turbo-instruct" ModelGPT35TurboInstruct0914 Model = "gpt-3.5-turbo-instruct-0914" ModelGPT35Turbo0125 Model = "gpt-3.5-turbo-0125" ModelGPT4 Model = "gpt-4" ModelGPT40314 Model = "gpt-4-0314" ModelGPT40613 Model = "gpt-4-0613" ModelGPT432K Model = "gpt-4-32k" ModelGPT432K0314 Model = "gpt-4-32k-0314" ModelGPT41106Previw Model = "gpt-4-1106-preview" ModelGPT4VisionPreview Model = "gpt-4-vision-preview" ModelGPT40125Preview Model = "gpt-4-0125-preview" ModelGPT4TurboPreview Model = "gpt-4-turbo-preview" ModelWhisper1 Model = "whisper-1" ModelTTS1 Model = "tts-1" ModelTTS11106 Model = "tts-1-1106" ModelTTS1HD Model = "tts-1-hd" ModelTTS1HD1106 Model = "tts-1-hd-1106" ModelTextModeration007 Model = "text-moderation-007" ModelTextModerationLatest Model = "text-moderation-latest" ModelTextModerationStable Model = "text-moderation-stable" ModelDallE2 Model = "dall-e-2" ModelDallE3 Model = "dall-e-3" )
type Models ¶
type Models struct { Object string `json:"object"` Data []struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` OwnedBy string `json:"owned_by"` Permission []struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` AllowCreateEngine bool `json:"allow_create_engine"` AllowSampling bool `json:"allow_sampling"` AllowLogprobs bool `json:"allow_logprobs"` AllowSearchIndices bool `json:"allow_search_indices"` AllowView bool `json:"allow_view"` AllowFineTuning bool `json:"allow_fine_tuning"` Organization string `json:"organization"` Group interface{} `json:"group"` IsBlocking bool `json:"is_blocking"` } `json:"permission"` Root string `json:"root"` Parent interface{} `json:"parent"` } `json:"data"` }
type RateLimiters ¶
type RateLimiters struct { Chat struct { Requests *rate.Limiter Tokens *rate.Limiter } Text struct { Requests *rate.Limiter Tokens *rate.Limiter } Embedding struct { Requests *rate.Limiter Tokens *rate.Limiter } Images struct { Requests *rate.Limiter } Audio struct { Requests *rate.Limiter } }
RateLimiters is a struct that holds all of the rate limiters for the OpenAI API that can be used by clients to rate limit their requests.
These are not enforced by the client by default, but can be used to rate limit requests to the OpenAI API by calling the `Allow()` method on appropriate limiter before making a request.
Example ¶
// If the rate limiter allows the request, make the request. if openai.RateLimits.Chat.Requests.Allow() { resp, err := client.CreatChat(ctx, &openai.CreateChatRequest{ ... }) ... } // Wait for the rate limiter to allow the request. for openai.RateLimits.Chat.Requests.Wait(ctx) { resp, err := client.CreatChat(ctx, &openai.CreateChatRequest{ ... }) ... }
func NewRateLimiters ¶
func NewRateLimiters() *RateLimiters
NewRateLimiters returns a new set of rate limiters for the OpenAI API.
type Role ¶
type Role = string
Role is the role of the user for a chat message.
const ( // RoleSystem is a special used to ground the model within the context of the conversation. // // For example, it may be used to provide a name for the assistant, or to provide other global information // or instructions that the model should know about. RoleSystem Role = "system" // RoleUser is the role of the user for a chat message. RoleUser Role = "user" // RoleAssistant is the role of the assistant for a chat message. RoleAssistant Role = "assistant" // RoleFunction is a special role used to represent a function call. RoleFunction Role = "function" )
type Run ¶
type Run struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` ThreadID string `json:"thread_id"` AssistantID string `json:"assistant_id"` Status string `json:"status"` RequiredAction string `json:"required_action,omitempty"` LastError map[string]any `json:"last_error,omitempty"` ExpiresAt int `json:"expires_at"` StartedAt int `json:"started_at,omitempty"` CancelledAt int `json:"cancelled_at,omitempty"` FailedAt int `json:"failed_at,omitempty"` CompletedAt int `json:"completed_at,omitempty"` Model string `json:"model"` Instructions string `json:"instructions"` Tools []map[string]any `json:"tools"` FileIDs []string `json:"file_ids"` Metadata map[string]any `json:"metadata"` }
type RunStatus ¶
type RunStatus = string
https://platform.openai.com/docs/api-reference/runs/object#runs/object-status
const ( RunStatusQueued RunStatus = "queued" RunStatusInProgress RunStatus = "in_progress" RunStatusRequiresAction RunStatus = "requires_action" RunStatusCancelling RunStatus = "cancelling" RunStatusCancelled RunStatus = "cancelled" RunStatusFailed RunStatus = "failed" RunStatusCompleted RunStatus = "completed" RunStatusExpired RunStatus = "expired" )
type RunStep ¶
type RunStep struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` AssistantID string `json:"assistant_id"` ThreadID string `json:"thread_id"` RunID string `json:"run_id"` Type string `json:"type"` Status string `json:"status"` StepDetails map[string]any `json:"step_details"` LastError map[string]any `json:"last_error,omitempty"` ExpiredAt int `json:"expired_at,omitempty"` CanceledAt int `json:"canceled_at,omitempty"` FailedAt int `json:"failed_at,omitempty"` CompletedAt int `json:"completed_at,omitempty"` Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/runs/step-object
type SubmitToolOutputsRequest ¶
type SubmitToolOutputsRequest struct { // https://platform.openai.com/docs/api-reference/runs/submitToolOutputs#runs-submittooloutputs-thread_id // // Required. ThreadID string `json:"-"` // https://platform.openai.com/docs/api-reference/runs/submitToolOutputs#runs-submittooloutputs-run_id // // Required. RunID string `json:"-"` // https://platform.openai.com/docs/api-reference/runs/submitToolOutputs#runs-submittooloutputs-tool_id // // Required. ToolOuputs []*AssistantToolOutput `json:"tool_outputs"` }
https://platform.openai.com/docs/api-reference/runs/submitToolOutputs
type SubmitToolOutputsResponse ¶
type SubmitToolOutputsResponse = Run
https://platform.openai.com/docs/api-reference/runs/submitToolOutputs
type Thread ¶
type Thread struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` Metadata map[string]any `json:"metadata"` }
https://platform.openai.com/docs/api-reference/threads/object
type ThreadMessage ¶
type ThreadMessage struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` ThreadID string `json:"thread_id"` Role string `json:"role"` Content []ThreadMessageContent `json:"content"` AssistantID string `json:"assistant_id,omitempty"` RunID string `json:"run_id,omitempty"` FileIDs []string `json:"file_ids,omitempty"` Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/messages/object
type ThreadMessageContent ¶
https://platform.openai.com/docs/api-reference/messages/object
func (ThreadMessageContent) Text ¶
func (t ThreadMessageContent) Text() string
Text returns the text value from the thread message content, or an empty string if the text value is not present.
type UpdateAssistantRequest ¶
type UpdateAssistantRequest struct { // https://platform.openai.com/docs/api-reference/assistants/update#assistants/update-id // // Required. ID string `json:"-"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-model // // Optional. Model string `json:"model,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-name // // Optional. Name string `json:"name,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-description // // Optional. Description string `json:"description,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-instructions // // Optional. Instructions string `json:"instructions,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-tools // // Optional. Tools []map[string]any `json:"tools,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-file_ids // // Optional. FileIDs []string `json:"file_ids,omitempty"` // https://platform.openai.com/docs/api-reference/assistants/modifyAssistant#assistants-modifyassistant-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/assistants/modifyAssistant
type UpdateMessageRequest ¶
type UpdateMessageRequest struct { // https://platform.openai.com/docs/api-reference/messages/getMessage#messages-getmessage-thread_id // // Required. ThreadID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/messages/getMessage#messages-getmessage-message_id // // Required. MessageID string `json:"message_id"` // https://platform.openai.com/docs/api-reference/messages/modifyMessage#messages-modifymessage-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/messages/modifyMessage
type UpdateMessageResponse ¶
type UpdateMessageResponse = ThreadMessage
type UpdateRunRequest ¶
type UpdateRunRequest struct { // https://platform.openai.com/docs/api-reference/runs/modifyRun#runs-modifyrun-thread_id // // Required. ThreadID string `json:"-"` // https://platform.openai.com/docs/api-reference/runs/modifyRun#runs-modifyrun-run_id // // Required. RunID string `json:"-"` // https://platform.openai.com/docs/api-reference/runs/modifyRun#runs-modifyrun-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/runs/modifyRun
type UpdateRunResponse ¶
type UpdateRunResponse = Run
https://platform.openai.com/docs/api-reference/runs/modifyRun
type UpdateThreadRequest ¶
type UpdateThreadRequest struct { // https://platform.openai.com/docs/api-reference/threads/modifyThread#threads-modifythread-id // // Required. ID string `json:"thread_id"` // https://platform.openai.com/docs/api-reference/threads/modifyThread#threads-modifythread-metadata // // Optional. Metadata map[string]any `json:"metadata,omitempty"` }
https://platform.openai.com/docs/api-reference/threads/modifyThread
type UpdateThreadResponse ¶
type UpdateThreadResponse = Thread
type UploadFileRequest ¶
type UploadFileRequest struct { // Name of the JSON Lines file to be uploaded. // // If the purpose is set to "fine-tune", each line is a JSON // record with "prompt" and "completion" fields representing // your training examples. // // Required. Name string `json:"name"` // Purpose of the uploaded documents. // // Use "fine-tune" for Fine-tuning. This allows us to validate t // the format of the uploaded file. // // Required. Purpose string `json:"purpose"` // Body of the file to upload. // // Required. Body io.Reader `json:"file"` // TODO: how to handle this? }
Directories ¶
Path | Synopsis |
---|---|
cmd
|
|
Package embeddings provides utilities for working with embeddings.
|
Package embeddings provides utilities for working with embeddings. |