Skip to content

Commit

Permalink
Merge pull request #7 from krrishdholakia/patch-1
Browse files Browse the repository at this point in the history
adding support for Claude, Replicate, Cohere, Azure
  • Loading branch information
sunnweiwei authored Aug 4, 2023
2 parents c7c02f0 + 84beadc commit d92da84
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 8 deletions.
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,15 @@ Code for paper "[Is ChatGPT Good at Search? Investigating Large Language Models

This project aims to explore generative LLMs such as ChatGPT and GPT-4 for relevance ranking in Information Retrieval (IR).

## Update
🚀 This project now supports Azure, Claude, Cohere, Llama2 via [LiteLLM](https://github.com/BerriAI/litellm)
⏳ PaLM support is [coming soon](https://github.com/BerriAI/litellm/pull/36)
To get a list of available models run:
```python
import litellm
print(litellm.model_list)
```

## News
- **[2023.07.11]** Release a new test set NovelEval with the novel search questions and passages that have not been contaminated by the latest LLMs (e.g., GPT-4). See [NovelEval](https://github.com/sunnweiwei/RankGPT/tree/main/NovelEval) for details.
- **[2023.04.23]** Sharing 100K ChatGPT predicted permutations on MS MARCO training set [here](#download-data-and-model).
Expand Down
21 changes: 13 additions & 8 deletions rank_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
import openai
import json
import tiktoken

from litellm import completion

class SafeOpenai:
def __init__(self, keys=None, start_id=None, proxy=None):
def __init__(self, model=None, keys=None, start_id=None, proxy=None):
if isinstance(keys, str):
keys = [keys]
if keys is None:
Expand All @@ -18,11 +18,16 @@ def __init__(self, keys=None, start_id=None, proxy=None):
self.key_id = self.key_id % len(self.key)
openai.proxy = proxy
openai.api_key = self.key[self.key_id % len(self.key)]
self.api_key = self.key[self.key_id % len(self.key)]

def chat(self, *args, return_text=False, reduce_length=False, **kwargs):
while True:
try:
completion = openai.ChatCompletion.create(*args, **kwargs, timeout=30)
model = args[0] if len(args) > 0 else kwargs["model"]
if "gpt" in model:
completion = openai.ChatCompletion.create(*args, **kwargs, timeout=30)
elif model in litellm.model_list:
completion = completion(*args, **kwargs, api_key=self.api_key, force_timeout=30)
break
except Exception as e:
print(str(e))
Expand Down Expand Up @@ -214,22 +219,22 @@ def receive_permutation(item, permutation, rank_start=0, rank_end=100):
return item


def permutation_pipeline(item=None, rank_start=0, rank_end=100, model_name='gpt-3.5-turbo', openai_key=None):
def permutation_pipeline(item=None, rank_start=0, rank_end=100, model_name='gpt-3.5-turbo', api_key=None): # change to `api_key` from `openai_key` to make it more generic
messages = create_permutation_instruction(item=item, rank_start=rank_start, rank_end=rank_end,
model_name=model_name)
permutation = run_llm(messages, openai_key=openai_key, model_name=model_name)
model_name=model_name) # chan
permutation = run_llm(messages, openai_key=api_key, model_name=model_name)
item = receive_permutation(item, permutation, rank_start=rank_start, rank_end=rank_end)
return item


def sliding_windows(item=None, rank_start=0, rank_end=100, window_size=20, step=10, model_name='gpt-3.5-turbo',
openai_key=None):
api_key=None): # change to `api_key` from `openai_key` to make it more generic
item = copy.deepcopy(item)
end_pos = rank_end
start_pos = rank_end - window_size
while start_pos >= rank_start:
start_pos = max(start_pos, rank_start)
item = permutation_pipeline(item, start_pos, end_pos, model_name=model_name, openai_key=openai_key)
item = permutation_pipeline(item, start_pos, end_pos, model_name=model_name, openai_key=api_key)
end_pos = end_pos - step
start_pos = start_pos - step
return item
Expand Down

0 comments on commit d92da84

Please sign in to comment.