Skip to content

Commit

Permalink
feat(OpenAIClient): AZURE_USE_MODEL_AS_DEPLOYMENT_NAME, AZURE_OPENAI_…
Browse files Browse the repository at this point in the history
…DEFAULT_MODEL (danny-avila#1165)

* feat(OpenAIClient): AZURE_USE_MODEL_AS_DEPLOYMENT_NAME, AZURE_OPENAI_DEFAULT_MODEL

* ci: fix initializeClient test
  • Loading branch information
danny-avila authored Nov 10, 2023
1 parent 9d100ec commit d5259e1
Show file tree
Hide file tree
Showing 10 changed files with 242 additions and 60 deletions.
18 changes: 15 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,23 @@ DEBUG_OPENAI=false # Set to true to enable debug mode for the OpenAI endpoint
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=

# Identify the available models, separated by commas *without spaces*.
# The first will be default.
# Leave it blank to use internal settings.
# NOTE: As of 2023-11-10, the Azure API only allows one model per deployment,
# It's recommended to name your deployments after the model name, e.g. "gpt-35-turbo,"
# which allows for fast deployment switching and AZURE_USE_MODEL_AS_DEPLOYMENT_NAME enabled.
# However, you can use non-model deployment names and setting the AZURE_OPENAI_DEFAULT_MODEL to ensure it works as expected.

# Identify the available models, separated by commas *without spaces*.
# The first will be default. Leave it blank or as is to use internal settings.
# NOTE: as deployment names can't have periods, they will be removed when the endpoint is generated.
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4

# (Advanced) this enables the use of the model name as the deployment name, e.g. "gpt-3.5-turbo" as the deployment name
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE

# (Advanced) this overrides the model setting for Azure, in case you want to use your custom deployment names
# as the values for AZURE_OPENAI_MODELS
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo

# To use Azure with the Plugins endpoint, you need the variables above, and uncomment the following variable:
# NOTE: This may not work as expected and Azure OpenAI may not support OpenAI Functions yet
# Omit/leave it commented to use the default OpenAI API
Expand Down
11 changes: 8 additions & 3 deletions api/app/clients/OpenAIClient.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ class OpenAIClient extends BaseClient {
: 'discard';
this.shouldSummarize = this.contextStrategy === 'summarize';
this.azure = options.azure || false;
if (this.azure) {
this.azureEndpoint = genAzureChatCompletion(this.azure);
}
this.setOptions(options);
}

Expand Down Expand Up @@ -86,6 +83,13 @@ class OpenAIClient extends BaseClient {
isEnabled(OPENAI_FORCE_PROMPT) ||
(reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat'));

if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model);
this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
} else if (this.azure) {
this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model);
}

const { model } = this.modelOptions;

this.isChatCompletion = this.useOpenRouter || !!reverseProxy || model.includes('gpt-');
Expand Down Expand Up @@ -533,6 +537,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
this.options.debug && console.error(e.message, e);
modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
if (this.azure) {
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model);
}
const instructionsPayload = [
Expand Down
65 changes: 64 additions & 1 deletion api/app/clients/llm/createLLM.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,60 @@
const { ChatOpenAI } = require('langchain/chat_models/openai');
const { sanitizeModelName } = require('../../../utils');
const { isEnabled } = require('../../../server/utils');

/**
* @typedef {Object} ModelOptions
* @property {string} modelName - The name of the model.
* @property {number} [temperature] - The temperature setting for the model.
* @property {number} [presence_penalty] - The presence penalty setting.
* @property {number} [frequency_penalty] - The frequency penalty setting.
* @property {number} [max_tokens] - The maximum number of tokens to generate.
*/

/**
* @typedef {Object} ConfigOptions
* @property {string} [basePath] - The base path for the API requests.
* @property {Object} [baseOptions] - Base options for the API requests, including headers.
* @property {Object} [httpAgent] - The HTTP agent for the request.
* @property {Object} [httpsAgent] - The HTTPS agent for the request.
*/

/**
* @typedef {Object} Callbacks
* @property {Function} [handleChatModelStart] - A callback function for handleChatModelStart
* @property {Function} [handleLLMEnd] - A callback function for handleLLMEnd
* @property {Function} [handleLLMError] - A callback function for handleLLMError
*/

/**
* @typedef {Object} AzureOptions
* @property {string} [azureOpenAIApiKey] - The Azure OpenAI API key.
* @property {string} [azureOpenAIApiInstanceName] - The Azure OpenAI API instance name.
* @property {string} [azureOpenAIApiDeploymentName] - The Azure OpenAI API deployment name.
* @property {string} [azureOpenAIApiVersion] - The Azure OpenAI API version.
*/

/**
* Creates a new instance of a language model (LLM) for chat interactions.
*
* @param {Object} options - The options for creating the LLM.
* @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings.
* @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers.
* @param {Callbacks} options.callbacks - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
* @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode.
* @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication.
* @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations.
*
* @returns {ChatOpenAI} An instance of the ChatOpenAI class, configured with the provided options.
*
* @example
* const llm = createLLM({
* modelOptions: { modelName: 'gpt-3.5-turbo', temperature: 0.2 },
* configOptions: { basePath: 'https://example.api/path' },
* callbacks: { onMessage: handleMessage },
* openAIApiKey: 'your-api-key'
* });
*/
function createLLM({
modelOptions,
configOptions,
Expand All @@ -16,10 +70,19 @@ function createLLM({

let azureOptions = {};
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);

credentials = {};
configuration = {};
azureOptions = azure;
azureOptions.azureOpenAIApiDeploymentName = sanitizeModelName(modelOptions.modelName);

azureOptions.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(modelOptions.modelName)
: azureOptions.azureOpenAIApiDeploymentName;
}

if (azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
modelOptions.modelName = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}

// console.debug('createLLM: configOptions');
Expand Down
98 changes: 98 additions & 0 deletions api/app/clients/specs/OpenAIClient.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,14 @@ describe('OpenAIClient', () => {
{ role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' },
];

beforeAll(() => {
jest.spyOn(console, 'warn').mockImplementation(() => {});
});

afterAll(() => {
console.warn.mockRestore();
});

beforeEach(() => {
const options = {
// debug: true,
Expand Down Expand Up @@ -90,6 +98,96 @@ describe('OpenAIClient', () => {
});
});

describe('setOptions with Simplified Azure Integration', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
});

const azureOpenAIApiInstanceName = 'test-instance';
const azureOpenAIApiDeploymentName = 'test-deployment';
const azureOpenAIApiVersion = '2020-07-01-preview';

const createOptions = (model) => ({
modelOptions: { model },
azure: {
azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName,
azureOpenAIApiVersion,
},
});

it('should set model from AZURE_OPENAI_DEFAULT_MODEL when Azure is enabled', () => {
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
const options = createOptions('test');
client.azure = options.azure;
client.setOptions(options);
expect(client.modelOptions.model).toBe('gpt-4-azure');
});

it('should not change model if Azure is not enabled', () => {
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
const originalModel = 'test';
client.azure = false;
client.setOptions(createOptions('test'));
expect(client.modelOptions.model).toBe(originalModel);
});

it('should not change model if AZURE_OPENAI_DEFAULT_MODEL is not set and model is passed', () => {
const originalModel = 'GROK-LLM';
const options = createOptions(originalModel);
client.azure = options.azure;
client.setOptions(options);
expect(client.modelOptions.model).toBe(originalModel);
});

it('should change model if AZURE_OPENAI_DEFAULT_MODEL is set and model is passed', () => {
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
const originalModel = 'GROK-LLM';
const options = createOptions(originalModel);
client.azure = options.azure;
client.setOptions(options);
expect(client.modelOptions.model).toBe(process.env.AZURE_OPENAI_DEFAULT_MODEL);
});

it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is set', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const model = 'gpt-4-azure';

const AzureClient = new OpenAIClient('test-api-key', createOptions(model));

const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;

expect(AzureClient.modelOptions.model).toBe(model);
expect(AzureClient.azureEndpoint).toBe(expectedValue);
});

it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME and default model is set', () => {
const defaultModel = 'gpt-4-azure';
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
process.env.AZURE_OPENAI_DEFAULT_MODEL = defaultModel;
const model = 'gpt-4-this-is-a-test-model-name';

const AzureClient = new OpenAIClient('test-api-key', createOptions(model));

const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;

expect(AzureClient.modelOptions.model).toBe(defaultModel);
expect(AzureClient.azureEndpoint).toBe(expectedValue);
});

it('should not include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is not set', () => {
const model = 'gpt-4-azure';

const AzureClient = new OpenAIClient('test-api-key', createOptions(model));

const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}/chat/completions?api-version=${azureOpenAIApiVersion}`;

expect(AzureClient.modelOptions.model).toBe(model);
expect(AzureClient.azureEndpoint).toBe(expectedValue);
});
});

describe('selectTokenizer', () => {
it('should get the correct tokenizer based on the instance state', () => {
const tokenizer = client.selectTokenizer();
Expand Down
5 changes: 1 addition & 4 deletions api/server/routes/endpoints/gptPlugins/initializeClient.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const { PluginsClient } = require('../../../../app');
const { isEnabled } = require('../../../utils');
const { getAzureCredentials, sanitizeModelName } = require('../../../../utils');
const { getAzureCredentials } = require('../../../../utils');
const { getUserKey, checkUserKeyExpiry } = require('../../../services/UserService');

const initializeClient = async ({ req, res, endpointOption }) => {
Expand Down Expand Up @@ -47,9 +47,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {

if (useAzure || (apiKey && apiKey.includes('azure') && !clientOptions.azure)) {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
clientOptions.azure.azureOpenAIApiDeploymentName = sanitizeModelName(
clientOptions.modelOptions.model,
);
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

Expand Down
32 changes: 6 additions & 26 deletions api/server/routes/endpoints/gptPlugins/initializeClient.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,12 @@ describe('gptPlugins/initializeClient', () => {

test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
process.env.PLUGINS_USE_AZURE = 'true';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

Expand Down Expand Up @@ -174,31 +179,6 @@ describe('gptPlugins/initializeClient', () => {
);
});

test('should sanitize model name for Azure when modelOptions is provided', async () => {
process.env.AZURE_API_KEY = 'azure-provided-api-key';
process.env.PLUGINS_USE_AZURE = 'true';

const modelName = 'test-3.5-model';
const sanitizedModelName = 'test-35-model';
const req = {
body: { key: new Date(Date.now() + 10000).toISOString() },
user: { id: '123' },
};
const res = {};
const endpointOption = { modelOptions: { model: modelName } };

getUserKey.mockResolvedValue(
JSON.stringify({
azureOpenAIApiKey: 'test-user-provided-azure-api-key',
azureOpenAIApiDeploymentName: modelName,
}),
);

const { azure } = await initializeClient({ req, res, endpointOption });

expect(azure.azureOpenAIApiDeploymentName).toBe(sanitizedModelName);
});

test('should throw an error if the user-provided Azure key is invalid JSON', async () => {
process.env.AZURE_API_KEY = 'user_provided';
process.env.PLUGINS_USE_AZURE = 'true';
Expand Down
5 changes: 1 addition & 4 deletions api/server/routes/endpoints/openAI/initializeClient.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const { OpenAIClient } = require('../../../../app');
const { isEnabled } = require('../../../utils');
const { getAzureCredentials, sanitizeModelName } = require('../../../../utils');
const { getAzureCredentials } = require('../../../../utils');
const { getUserKey, checkUserKeyExpiry } = require('../../../services/UserService');

const initializeClient = async ({ req, res, endpointOption }) => {
Expand Down Expand Up @@ -44,9 +44,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {

if (endpoint === 'azureOpenAI') {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
clientOptions.azure.azureOpenAIApiDeploymentName = sanitizeModelName(
clientOptions.modelOptions.model,
);
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

Expand Down
24 changes: 6 additions & 18 deletions api/server/routes/endpoints/openAI/initializeClient.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,12 @@ describe('initializeClient', () => {

test('should initialize client with Azure credentials when endpoint is azureOpenAI', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
process.env.OPENAI_API_KEY = 'test-openai-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.OPENAI_API_KEY = 'test-openai-api-key');
process.env.DEBUG_OPENAI = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

Expand Down Expand Up @@ -191,21 +196,4 @@ describe('initializeClient', () => {
/Your OpenAI API key has expired/,
);
});

test('should sanitize model name for Azure when modelOptions is provided', async () => {
const modelName = 'test-3.5-model';
const sanitizedModelName = 'test-35-model';
const req = {
body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: 'azureOpenAI' },
user: { id: '123' },
};
const res = {};
const endpointOption = { modelOptions: { model: modelName } };
process.env.AZURE_API_KEY = 'azure-provided-api-key';
getUserKey.mockResolvedValue('test-user-provided-openai-api-key');

const result = await initializeClient({ req, res, endpointOption });

expect(result.client.options.azure.azureOpenAIApiDeploymentName).toBe(sanitizedModelName);
});
});
4 changes: 3 additions & 1 deletion api/utils/azureUtils.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
* @property {string} azureOpenAIApiVersion - The Azure OpenAI API version.
*/

const { isEnabled } = require('../server/utils');

/**
* Sanitizes the model name to be used in the URL by removing or replacing disallowed characters.
* @param {string} modelName - The model name to be sanitized.
Expand Down Expand Up @@ -44,7 +46,7 @@ const genAzureChatCompletion = (
) => {
// Determine the deployment segment of the URL based on provided modelName or azureOpenAIApiDeploymentName
let deploymentSegment;
if (modelName) {
if (isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME) && modelName) {
const sanitizedModelName = sanitizeModelName(modelName);
deploymentSegment = `${sanitizedModelName}`;
} else if (azureOpenAIApiDeploymentName) {
Expand Down
Loading

0 comments on commit d5259e1

Please sign in to comment.