Skip to content

Commit

Permalink
feat: extract out ImageModel, AudioModel, SpeechModel (openai#3)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] committed Aug 7, 2024
1 parent 103d454 commit 4b90869
Show file tree
Hide file tree
Showing 18 changed files with 95 additions and 226 deletions.
16 changes: 16 additions & 0 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ Methods:

# Images

Params Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#ImageModel">ImageModel</a>

Response Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#Image">Image</a>
Expand All @@ -100,6 +104,10 @@ Methods:

# Audio

Params Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#AudioModel">AudioModel</a>

## Transcriptions

Response Types:
Expand All @@ -122,12 +130,20 @@ Methods:

## Speech

Params Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#SpeechModel">SpeechModel</a>

Methods:

- <code title="post /audio/speech">client.Audio.Speech.<a href="https://pkg.go.dev/github.com/openai/openai-go#AudioSpeechService.New">New</a>(ctx <a href="https://pkg.go.dev/context">context</a>.<a href="https://pkg.go.dev/context#Context">Context</a>, body <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#AudioSpeechNewParams">AudioSpeechNewParams</a>) (http.Response, <a href="https://pkg.go.dev/builtin#error">error</a>)</code>

# Moderations

Params Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#ModerationModel">ModerationModel</a>

Response Types:

- <a href="https://pkg.go.dev/github.com/openai/openai-go">openai</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go#Moderation">Moderation</a>
Expand Down
14 changes: 14 additions & 0 deletions audio.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,17 @@ func NewAudioService(opts ...option.RequestOption) (r *AudioService) {
r.Speech = NewAudioSpeechService(opts...)
return
}

type AudioModel string

const (
AudioModelWhisper1 AudioModel = "whisper-1"
)

func (r AudioModel) IsKnown() bool {
switch r {
case AudioModelWhisper1:
return true
}
return false
}
32 changes: 16 additions & 16 deletions audiospeech.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,27 @@ func (r *AudioSpeechService) New(ctx context.Context, body AudioSpeechNewParams,
return
}

type SpeechModel string

const (
SpeechModelTTS1 SpeechModel = "tts-1"
SpeechModelTTS1HD SpeechModel = "tts-1-hd"
)

func (r SpeechModel) IsKnown() bool {
switch r {
case SpeechModelTTS1, SpeechModelTTS1HD:
return true
}
return false
}

type AudioSpeechNewParams struct {
// The text to generate audio for. The maximum length is 4096 characters.
Input param.Field[string] `json:"input,required"`
// One of the available [TTS models](https://platform.openai.com/docs/models/tts):
// `tts-1` or `tts-1-hd`
Model param.Field[AudioSpeechNewParamsModel] `json:"model,required"`
Model param.Field[SpeechModel] `json:"model,required"`
// The voice to use when generating the audio. Supported voices are `alloy`,
// `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
// available in the
Expand All @@ -63,21 +78,6 @@ func (r AudioSpeechNewParams) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}

type AudioSpeechNewParamsModel string

const (
AudioSpeechNewParamsModelTTS1 AudioSpeechNewParamsModel = "tts-1"
AudioSpeechNewParamsModelTTS1HD AudioSpeechNewParamsModel = "tts-1-hd"
)

func (r AudioSpeechNewParamsModel) IsKnown() bool {
switch r {
case AudioSpeechNewParamsModelTTS1, AudioSpeechNewParamsModelTTS1HD:
return true
}
return false
}

// The voice to use when generating the audio. Supported voices are `alloy`,
// `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
// available in the
Expand Down
2 changes: 1 addition & 1 deletion audiospeech_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestAudioSpeechNewWithOptionalParams(t *testing.T) {
)
resp, err := client.Audio.Speech.New(context.TODO(), openai.AudioSpeechNewParams{
Input: openai.F("input"),
Model: openai.F(openai.AudioSpeechNewParamsModelTTS1),
Model: openai.F(openai.SpeechModelTTS1),
Voice: openai.F(openai.AudioSpeechNewParamsVoiceAlloy),
ResponseFormat: openai.F(openai.AudioSpeechNewParamsResponseFormatMP3),
Speed: openai.F(0.250000),
Expand Down
16 changes: 1 addition & 15 deletions audiotranscription.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ type AudioTranscriptionNewParams struct {
File param.Field[io.Reader] `json:"file,required" format:"binary"`
// ID of the model to use. Only `whisper-1` (which is powered by our open source
// Whisper V2 model) is currently available.
Model param.Field[AudioTranscriptionNewParamsModel] `json:"model,required"`
Model param.Field[AudioModel] `json:"model,required"`
// The language of the input audio. Supplying the input language in
// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
// improve accuracy and latency.
Expand Down Expand Up @@ -114,20 +114,6 @@ func (r AudioTranscriptionNewParams) MarshalMultipart() (data []byte, contentTyp
return buf.Bytes(), writer.FormDataContentType(), nil
}

type AudioTranscriptionNewParamsModel string

const (
AudioTranscriptionNewParamsModelWhisper1 AudioTranscriptionNewParamsModel = "whisper-1"
)

func (r AudioTranscriptionNewParamsModel) IsKnown() bool {
switch r {
case AudioTranscriptionNewParamsModelWhisper1:
return true
}
return false
}

// The format of the transcript output, in one of these options: `json`, `text`,
// `srt`, `verbose_json`, or `vtt`.
type AudioTranscriptionNewParamsResponseFormat string
Expand Down
2 changes: 1 addition & 1 deletion audiotranscription_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestAudioTranscriptionNewWithOptionalParams(t *testing.T) {
)
_, err := client.Audio.Transcriptions.New(context.TODO(), openai.AudioTranscriptionNewParams{
File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))),
Model: openai.F(openai.AudioTranscriptionNewParamsModelWhisper1),
Model: openai.F(openai.AudioModelWhisper1),
Language: openai.F("language"),
Prompt: openai.F("prompt"),
ResponseFormat: openai.F(openai.AudioTranscriptionNewParamsResponseFormatJSON),
Expand Down
16 changes: 1 addition & 15 deletions audiotranslation.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ type AudioTranslationNewParams struct {
File param.Field[io.Reader] `json:"file,required" format:"binary"`
// ID of the model to use. Only `whisper-1` (which is powered by our open source
// Whisper V2 model) is currently available.
Model param.Field[AudioTranslationNewParamsModel] `json:"model,required"`
Model param.Field[AudioModel] `json:"model,required"`
// An optional text to guide the model's style or continue a previous audio
// segment. The
// [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
Expand Down Expand Up @@ -100,17 +100,3 @@ func (r AudioTranslationNewParams) MarshalMultipart() (data []byte, contentType
}
return buf.Bytes(), writer.FormDataContentType(), nil
}

type AudioTranslationNewParamsModel string

const (
AudioTranslationNewParamsModelWhisper1 AudioTranslationNewParamsModel = "whisper-1"
)

func (r AudioTranslationNewParamsModel) IsKnown() bool {
switch r {
case AudioTranslationNewParamsModelWhisper1:
return true
}
return false
}
2 changes: 1 addition & 1 deletion audiotranslation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestAudioTranslationNewWithOptionalParams(t *testing.T) {
)
_, err := client.Audio.Translations.New(context.TODO(), openai.AudioTranslationNewParams{
File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))),
Model: openai.F(openai.AudioTranslationNewParamsModelWhisper1),
Model: openai.F(openai.AudioModelWhisper1),
Prompt: openai.F("prompt"),
ResponseFormat: openai.F("response_format"),
Temperature: openai.F(0.000000),
Expand Down
37 changes: 1 addition & 36 deletions betaassistant.go
Original file line number Diff line number Diff line change
Expand Up @@ -1988,7 +1988,7 @@ type BetaAssistantNewParams struct {
// see all of your available models, or see our
// [Model overview](https://platform.openai.com/docs/models/overview) for
// descriptions of them.
Model param.Field[BetaAssistantNewParamsModel] `json:"model,required"`
Model param.Field[ChatModel] `json:"model,required"`
// The description of the assistant. The maximum length is 512 characters.
Description param.Field[string] `json:"description"`
// The system instructions that the assistant uses. The maximum length is 256,000
Expand Down Expand Up @@ -2042,41 +2042,6 @@ func (r BetaAssistantNewParams) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}

type BetaAssistantNewParamsModel string

const (
BetaAssistantNewParamsModelGPT4o BetaAssistantNewParamsModel = "gpt-4o"
BetaAssistantNewParamsModelGPT4o2024_05_13 BetaAssistantNewParamsModel = "gpt-4o-2024-05-13"
BetaAssistantNewParamsModelGPT4oMini BetaAssistantNewParamsModel = "gpt-4o-mini"
BetaAssistantNewParamsModelGPT4oMini2024_07_18 BetaAssistantNewParamsModel = "gpt-4o-mini-2024-07-18"
BetaAssistantNewParamsModelGPT4Turbo BetaAssistantNewParamsModel = "gpt-4-turbo"
BetaAssistantNewParamsModelGPT4Turbo2024_04_09 BetaAssistantNewParamsModel = "gpt-4-turbo-2024-04-09"
BetaAssistantNewParamsModelGPT4_0125Preview BetaAssistantNewParamsModel = "gpt-4-0125-preview"
BetaAssistantNewParamsModelGPT4TurboPreview BetaAssistantNewParamsModel = "gpt-4-turbo-preview"
BetaAssistantNewParamsModelGPT4_1106Preview BetaAssistantNewParamsModel = "gpt-4-1106-preview"
BetaAssistantNewParamsModelGPT4VisionPreview BetaAssistantNewParamsModel = "gpt-4-vision-preview"
BetaAssistantNewParamsModelGPT4 BetaAssistantNewParamsModel = "gpt-4"
BetaAssistantNewParamsModelGPT4_0314 BetaAssistantNewParamsModel = "gpt-4-0314"
BetaAssistantNewParamsModelGPT4_0613 BetaAssistantNewParamsModel = "gpt-4-0613"
BetaAssistantNewParamsModelGPT4_32k BetaAssistantNewParamsModel = "gpt-4-32k"
BetaAssistantNewParamsModelGPT4_32k0314 BetaAssistantNewParamsModel = "gpt-4-32k-0314"
BetaAssistantNewParamsModelGPT4_32k0613 BetaAssistantNewParamsModel = "gpt-4-32k-0613"
BetaAssistantNewParamsModelGPT3_5Turbo BetaAssistantNewParamsModel = "gpt-3.5-turbo"
BetaAssistantNewParamsModelGPT3_5Turbo16k BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k"
BetaAssistantNewParamsModelGPT3_5Turbo0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0613"
BetaAssistantNewParamsModelGPT3_5Turbo1106 BetaAssistantNewParamsModel = "gpt-3.5-turbo-1106"
BetaAssistantNewParamsModelGPT3_5Turbo0125 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0125"
BetaAssistantNewParamsModelGPT3_5Turbo16k0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k-0613"
)

func (r BetaAssistantNewParamsModel) IsKnown() bool {
switch r {
case BetaAssistantNewParamsModelGPT4o, BetaAssistantNewParamsModelGPT4o2024_05_13, BetaAssistantNewParamsModelGPT4oMini, BetaAssistantNewParamsModelGPT4oMini2024_07_18, BetaAssistantNewParamsModelGPT4Turbo, BetaAssistantNewParamsModelGPT4Turbo2024_04_09, BetaAssistantNewParamsModelGPT4_0125Preview, BetaAssistantNewParamsModelGPT4TurboPreview, BetaAssistantNewParamsModelGPT4_1106Preview, BetaAssistantNewParamsModelGPT4VisionPreview, BetaAssistantNewParamsModelGPT4, BetaAssistantNewParamsModelGPT4_0314, BetaAssistantNewParamsModelGPT4_0613, BetaAssistantNewParamsModelGPT4_32k, BetaAssistantNewParamsModelGPT4_32k0314, BetaAssistantNewParamsModelGPT4_32k0613, BetaAssistantNewParamsModelGPT3_5Turbo, BetaAssistantNewParamsModelGPT3_5Turbo16k, BetaAssistantNewParamsModelGPT3_5Turbo0613, BetaAssistantNewParamsModelGPT3_5Turbo1106, BetaAssistantNewParamsModelGPT3_5Turbo0125, BetaAssistantNewParamsModelGPT3_5Turbo16k0613:
return true
}
return false
}

// A set of resources that are used by the assistant's tools. The resources are
// specific to the type of tool. For example, the `code_interpreter` tool requires
// a list of file IDs, while the `file_search` tool requires a list of vector store
Expand Down
2 changes: 1 addition & 1 deletion betaassistant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestBetaAssistantNewWithOptionalParams(t *testing.T) {
option.WithAPIKey("My API Key"),
)
_, err := client.Beta.Assistants.New(context.TODO(), openai.BetaAssistantNewParams{
Model: openai.F(openai.BetaAssistantNewParamsModelGPT4o),
Model: openai.F(openai.ChatModelGPT4o),
Description: openai.F("description"),
Instructions: openai.F("instructions"),
Metadata: openai.F[any](map[string]interface{}{}),
Expand Down
37 changes: 1 addition & 36 deletions betathread.go
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,7 @@ type BetaThreadNewAndRunParams struct {
// be used to execute this run. If a value is provided here, it will override the
// model associated with the assistant. If not, the model associated with the
// assistant will be used.
Model param.Field[BetaThreadNewAndRunParamsModel] `json:"model"`
Model param.Field[ChatModel] `json:"model"`
// Whether to enable
// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
// during tool use.
Expand Down Expand Up @@ -1030,41 +1030,6 @@ func (r BetaThreadNewAndRunParams) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}

type BetaThreadNewAndRunParamsModel string

const (
BetaThreadNewAndRunParamsModelGPT4o BetaThreadNewAndRunParamsModel = "gpt-4o"
BetaThreadNewAndRunParamsModelGPT4o2024_05_13 BetaThreadNewAndRunParamsModel = "gpt-4o-2024-05-13"
BetaThreadNewAndRunParamsModelGPT4oMini BetaThreadNewAndRunParamsModel = "gpt-4o-mini"
BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18 BetaThreadNewAndRunParamsModel = "gpt-4o-mini-2024-07-18"
BetaThreadNewAndRunParamsModelGPT4Turbo BetaThreadNewAndRunParamsModel = "gpt-4-turbo"
BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09 BetaThreadNewAndRunParamsModel = "gpt-4-turbo-2024-04-09"
BetaThreadNewAndRunParamsModelGPT4_0125Preview BetaThreadNewAndRunParamsModel = "gpt-4-0125-preview"
BetaThreadNewAndRunParamsModelGPT4TurboPreview BetaThreadNewAndRunParamsModel = "gpt-4-turbo-preview"
BetaThreadNewAndRunParamsModelGPT4_1106Preview BetaThreadNewAndRunParamsModel = "gpt-4-1106-preview"
BetaThreadNewAndRunParamsModelGPT4VisionPreview BetaThreadNewAndRunParamsModel = "gpt-4-vision-preview"
BetaThreadNewAndRunParamsModelGPT4 BetaThreadNewAndRunParamsModel = "gpt-4"
BetaThreadNewAndRunParamsModelGPT4_0314 BetaThreadNewAndRunParamsModel = "gpt-4-0314"
BetaThreadNewAndRunParamsModelGPT4_0613 BetaThreadNewAndRunParamsModel = "gpt-4-0613"
BetaThreadNewAndRunParamsModelGPT4_32k BetaThreadNewAndRunParamsModel = "gpt-4-32k"
BetaThreadNewAndRunParamsModelGPT4_32k0314 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0314"
BetaThreadNewAndRunParamsModelGPT4_32k0613 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0613"
BetaThreadNewAndRunParamsModelGPT3_5Turbo BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo"
BetaThreadNewAndRunParamsModelGPT3_5Turbo16k BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k"
BetaThreadNewAndRunParamsModelGPT3_5Turbo0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0613"
BetaThreadNewAndRunParamsModelGPT3_5Turbo1106 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-1106"
BetaThreadNewAndRunParamsModelGPT3_5Turbo0125 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0125"
BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k-0613"
)

func (r BetaThreadNewAndRunParamsModel) IsKnown() bool {
switch r {
case BetaThreadNewAndRunParamsModelGPT4o, BetaThreadNewAndRunParamsModelGPT4o2024_05_13, BetaThreadNewAndRunParamsModelGPT4oMini, BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18, BetaThreadNewAndRunParamsModelGPT4Turbo, BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09, BetaThreadNewAndRunParamsModelGPT4_0125Preview, BetaThreadNewAndRunParamsModelGPT4TurboPreview, BetaThreadNewAndRunParamsModelGPT4_1106Preview, BetaThreadNewAndRunParamsModelGPT4VisionPreview, BetaThreadNewAndRunParamsModelGPT4, BetaThreadNewAndRunParamsModelGPT4_0314, BetaThreadNewAndRunParamsModelGPT4_0613, BetaThreadNewAndRunParamsModelGPT4_32k, BetaThreadNewAndRunParamsModelGPT4_32k0314, BetaThreadNewAndRunParamsModelGPT4_32k0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k, BetaThreadNewAndRunParamsModelGPT3_5Turbo0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo1106, BetaThreadNewAndRunParamsModelGPT3_5Turbo0125, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613:
return true
}
return false
}

// If no thread is provided, an empty thread will be created.
type BetaThreadNewAndRunParamsThread struct {
// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
Expand Down
2 changes: 1 addition & 1 deletion betathread_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func TestBetaThreadNewAndRunWithOptionalParams(t *testing.T) {
MaxCompletionTokens: openai.F(int64(256)),
MaxPromptTokens: openai.F(int64(256)),
Metadata: openai.F[any](map[string]interface{}{}),
Model: openai.F(openai.BetaThreadNewAndRunParamsModelGPT4o),
Model: openai.F(openai.ChatModelGPT4o),
ParallelToolCalls: openai.F(true),
ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)),
Temperature: openai.F(1.000000),
Expand Down
Loading

0 comments on commit 4b90869

Please sign in to comment.