Skip to content

Commit

Permalink
Update rank_gpt.py
Browse files Browse the repository at this point in the history
  • Loading branch information
sunnweiwei authored Jan 20, 2024
1 parent fae9570 commit bc2a56f
Showing 1 changed file with 14 additions and 9 deletions.
23 changes: 14 additions & 9 deletions rank_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,18 @@
from tqdm import tqdm
import time
import openai
from openai import OpenAI
import json
import tiktoken

try:
import litellm
from litellm import completion
except:
litellm = None
completion = openai.ChatCompletion.create


class SafeOpenai:
def __init__(self, keys=None, start_id=None, proxy=None):
if isinstance(keys, str):
Expand All @@ -19,16 +24,15 @@ def __init__(self, keys=None, start_id=None, proxy=None):
self.key = keys
self.key_id = start_id or 0
self.key_id = self.key_id % len(self.key)
openai.proxy = proxy
openai.api_key = self.key[self.key_id % len(self.key)]
self.api_key = self.key[self.key_id % len(self.key)]
self.client = OpenAI(api_key=self.api_key)

def chat(self, *args, return_text=False, reduce_length=False, **kwargs):
while True:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
if "gpt" in model:
completion = openai.ChatCompletion.create(*args, **kwargs, timeout=30)
completion = self.client.chat.completions.create(*args, **kwargs, timeout=30)
elif model in litellm.model_list:
completion = completion(*args, **kwargs, api_key=self.api_key, force_timeout=30)
break
Expand All @@ -38,30 +42,31 @@ def chat(self, *args, return_text=False, reduce_length=False, **kwargs):
print('reduce_length')
return 'ERROR::reduce_length'
self.key_id = (self.key_id + 1) % len(self.key)
openai.api_key = self.key[self.key_id]
self.client = OpenAI(api_key=self.api_key)
time.sleep(0.1)
if return_text:
completion = completion['choices'][0]['message']['content']
completion = completion.choices[0].message.content
return completion

def text(self, *args, return_text=False, reduce_length=False, **kwargs):
while True:
try:
completion = openai.Completion.create(*args, **kwargs)
completion = self.client.completions.create(
*args, **kwargs
)
break
except Exception as e:
print(e)
if "This model's maximum context length is" in str(e):
print('reduce_length')
return 'ERROR::reduce_length'
self.key_id = (self.key_id + 1) % len(self.key)
openai.api_key = self.key[self.key_id]
self.client = OpenAI(api_key=self.api_key)
time.sleep(0.1)
if return_text:
completion = completion['choices'][0]['text']
completion = completion.choices[0].text
return completion


def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
"""Returns the number of tokens used by a list of messages."""
if model == "gpt-3.5-turbo":
Expand Down

0 comments on commit bc2a56f

Please sign in to comment.