Skip to content

Commit

Permalink
chore: add token rate support for 11/06 models (danny-avila#1146)
Browse files Browse the repository at this point in the history
* chore: update model rates with 11/06 rates

* chore: add new models to env.example for OPENAI_MODELS

* chore: reference actual maxTokensMap in ci tests
  • Loading branch information
danny-avila authored Nov 6, 2023
1 parent 4b63eb5 commit 48c087c
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 11 deletions.
2 changes: 1 addition & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ DEBUG_OPENAI=false # Set to true to enable debug mode for the OpenAI endpoint
# Identify the available models, separated by commas *without spaces*.
# The first will be default.
# Leave it blank to use internal settings.
# OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613

# Titling is enabled by default when initiating a conversation.
# Uncomment the following variable to disable this feature.
Expand Down
6 changes: 6 additions & 0 deletions api/models/tx.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ const tokenValues = {
'32k': { prompt: 60, completion: 120 },
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'gpt-4-1106': { prompt: 10, completion: 30 },
};

/**
Expand All @@ -26,8 +28,12 @@ const getValueKey = (model) => {

if (modelName.includes('gpt-3.5-turbo-16k')) {
return '16k';
} else if (modelName.includes('gpt-3.5-turbo-1106')) {
return 'gpt-3.5-turbo-1106';
} else if (modelName.includes('gpt-3.5')) {
return '4k';
} else if (modelName.includes('gpt-4-1106')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-32k')) {
return '32k';
} else if (modelName.includes('gpt-4')) {
Expand Down
53 changes: 49 additions & 4 deletions api/models/tx.spec.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const { getValueKey, getMultiplier, defaultRate } = require('./tx');
const { getValueKey, getMultiplier, defaultRate, tokenValues } = require('./tx');

describe('getValueKey', () => {
it('should return "16k" for model name containing "gpt-3.5-turbo-16k"', () => {
Expand All @@ -20,27 +20,72 @@ describe('getValueKey', () => {
it('should return undefined for model names that do not match any known patterns', () => {
expect(getValueKey('gpt-5-some-other-info')).toBeUndefined();
});

it('should return "gpt-3.5-turbo-1106" for model name containing "gpt-3.5-turbo-1106"', () => {
expect(getValueKey('gpt-3.5-turbo-1106-some-other-info')).toBe('gpt-3.5-turbo-1106');
expect(getValueKey('openai/gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
expect(getValueKey('gpt-3.5-turbo-1106/openai')).toBe('gpt-3.5-turbo-1106');
});

it('should return "gpt-4-1106" for model name containing "gpt-4-1106"', () => {
expect(getValueKey('gpt-4-1106-some-other-info')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-1106-vision-preview')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-1106-preview')).toBe('gpt-4-1106');
expect(getValueKey('openai/gpt-4-1106')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-1106/openai/')).toBe('gpt-4-1106');
});
});

describe('getMultiplier', () => {
it('should return the correct multiplier for a given valueKey and tokenType', () => {
expect(getMultiplier({ valueKey: '8k', tokenType: 'prompt' })).toBe(30);
expect(getMultiplier({ valueKey: '8k', tokenType: 'completion' })).toBe(60);
expect(getMultiplier({ valueKey: '8k', tokenType: 'prompt' })).toBe(tokenValues['8k'].prompt);
expect(getMultiplier({ valueKey: '8k', tokenType: 'completion' })).toBe(
tokenValues['8k'].completion,
);
});

it('should return defaultRate if tokenType is provided but not found in tokenValues', () => {
expect(getMultiplier({ valueKey: '8k', tokenType: 'unknownType' })).toBe(defaultRate);
});

it('should derive the valueKey from the model if not provided', () => {
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-4-some-other-info' })).toBe(30);
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-4-some-other-info' })).toBe(
tokenValues['8k'].prompt,
);
});

it('should return 1 if only model or tokenType is missing', () => {
expect(getMultiplier({ tokenType: 'prompt' })).toBe(1);
expect(getMultiplier({ model: 'gpt-4-some-other-info' })).toBe(1);
});

it('should return the correct multiplier for gpt-3.5-turbo-1106', () => {
expect(getMultiplier({ valueKey: 'gpt-3.5-turbo-1106', tokenType: 'prompt' })).toBe(
tokenValues['gpt-3.5-turbo-1106'].prompt,
);
expect(getMultiplier({ valueKey: 'gpt-3.5-turbo-1106', tokenType: 'completion' })).toBe(
tokenValues['gpt-3.5-turbo-1106'].completion,
);
});

it('should return the correct multiplier for gpt-4-1106', () => {
expect(getMultiplier({ valueKey: 'gpt-4-1106', tokenType: 'prompt' })).toBe(
tokenValues['gpt-4-1106'].prompt,
);
expect(getMultiplier({ valueKey: 'gpt-4-1106', tokenType: 'completion' })).toBe(
tokenValues['gpt-4-1106'].completion,
);
});

it('should derive the valueKey from the model if not provided for new models', () => {
expect(
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
).toBe(tokenValues['gpt-3.5-turbo-1106'].prompt);
expect(getMultiplier({ tokenType: 'completion', model: 'gpt-4-1106-vision-preview' })).toBe(
tokenValues['gpt-4-1106'].completion,
);
});

it('should return defaultRate if derived valueKey does not match any known patterns', () => {
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-5-some-other-info' })).toBe(
defaultRate,
Expand Down
2 changes: 2 additions & 0 deletions api/utils/tokens.js
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ const maxTokensMap = {
'gpt-3.5-turbo-0301': 4095,
'gpt-3.5-turbo-16k': 15999,
'gpt-3.5-turbo-16k-0613': 15999,
'gpt-3.5-turbo-1106': 16380, // -5 from max
'gpt-4-1106': 127995, // -5 from max
};

/**
Expand Down
58 changes: 52 additions & 6 deletions api/utils/tokens.spec.js
Original file line number Diff line number Diff line change
@@ -1,28 +1,30 @@
const { getModelMaxTokens, matchModelName } = require('./tokens');
const { getModelMaxTokens, matchModelName, maxTokensMap } = require('./tokens');

describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
expect(getModelMaxTokens('gpt-4-32k-0613')).toBe(32767);
expect(getModelMaxTokens('gpt-4-32k-0613')).toBe(maxTokensMap['gpt-4-32k-0613']);
});

test('should return correct tokens for partial match', () => {
expect(getModelMaxTokens('gpt-4-32k-unknown')).toBe(32767);
expect(getModelMaxTokens('gpt-4-32k-unknown')).toBe(maxTokensMap['gpt-4-32k']);
});

test('should return correct tokens for partial match (OpenRouter)', () => {
expect(getModelMaxTokens('openai/gpt-4-32k')).toBe(32767);
expect(getModelMaxTokens('openai/gpt-4-32k')).toBe(maxTokensMap['gpt-4-32k']);
});

test('should return undefined for no match', () => {
expect(getModelMaxTokens('unknown-model')).toBeUndefined();
});

test('should return correct tokens for another exact match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-16k-0613')).toBe(15999);
expect(getModelMaxTokens('gpt-3.5-turbo-16k-0613')).toBe(
maxTokensMap['gpt-3.5-turbo-16k-0613'],
);
});

test('should return correct tokens for another partial match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-unknown')).toBe(4095);
expect(getModelMaxTokens('gpt-3.5-turbo-unknown')).toBe(maxTokensMap['gpt-3.5-turbo']);
});

test('should return undefined for undefined input', () => {
Expand All @@ -36,6 +38,30 @@ describe('getModelMaxTokens', () => {
test('should return undefined for number input', () => {
expect(getModelMaxTokens(123)).toBeUndefined();
});

// 11/06 Update
test('should return correct tokens for gpt-3.5-turbo-1106 exact match', () => {
expect(getModelMaxTokens('gpt-3.5-turbo-1106')).toBe(maxTokensMap['gpt-3.5-turbo-1106']);
});

test('should return correct tokens for gpt-4-1106 exact match', () => {
expect(getModelMaxTokens('gpt-4-1106')).toBe(maxTokensMap['gpt-4-1106']);
});

test('should return correct tokens for gpt-3.5-turbo-1106 partial match', () => {
expect(getModelMaxTokens('something-/gpt-3.5-turbo-1106')).toBe(
maxTokensMap['gpt-3.5-turbo-1106'],
);
expect(getModelMaxTokens('gpt-3.5-turbo-1106/something-/')).toBe(
maxTokensMap['gpt-3.5-turbo-1106'],
);
});

test('should return correct tokens for gpt-4-1106 partial match', () => {
expect(getModelMaxTokens('gpt-4-1106/something')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106-preview')).toBe(maxTokensMap['gpt-4-1106']);
expect(getModelMaxTokens('gpt-4-1106-vision-preview')).toBe(maxTokensMap['gpt-4-1106']);
});
});

describe('matchModelName', () => {
Expand All @@ -57,4 +83,24 @@ describe('matchModelName', () => {
expect(matchModelName(123)).toBeUndefined();
expect(matchModelName({})).toBeUndefined();
});

// 11/06 Update
it('should return the exact model name for gpt-3.5-turbo-1106 if it exists in maxTokensMap', () => {
expect(matchModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
});

it('should return the exact model name for gpt-4-1106 if it exists in maxTokensMap', () => {
expect(matchModelName('gpt-4-1106')).toBe('gpt-4-1106');
});

it('should return the closest matching key for gpt-3.5-turbo-1106 partial matches', () => {
expect(matchModelName('gpt-3.5-turbo-1106/something')).toBe('gpt-3.5-turbo-1106');
expect(matchModelName('something/gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
});

it('should return the closest matching key for gpt-4-1106 partial matches', () => {
expect(matchModelName('something/gpt-4-1106')).toBe('gpt-4-1106');
expect(matchModelName('gpt-4-1106-preview')).toBe('gpt-4-1106');
expect(matchModelName('gpt-4-1106-vision-preview')).toBe('gpt-4-1106');
});
});

0 comments on commit 48c087c

Please sign in to comment.