forked from nlpxucan/WizardLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
151 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
base_instruction = "I want you act as a Prompt Creator.\r\n\ | ||
Your goal is to draw inspiration from the #Given Prompt# to create a brand new prompt.\r\n\ | ||
This new prompt should belong to the same domain as the #Given Prompt# but be even more rare.\r\n\ | ||
The LENGTH and complexity of the #Created Prompt# should be similar to that of the #Given Prompt#.\r\n\ | ||
The #Created Prompt# must be reasonable and must be understood and responded by humans.\r\n\ | ||
'#Given Prompt#', '#Created Prompt#', 'given prompt' and 'created prompt' are not allowed to appear in #Created Prompt#\r\n" | ||
|
||
|
||
|
||
def createBreadthPrompt(instruction): | ||
prompt = base_instruction | ||
prompt += "#Given Prompt#: \r\n {} \r\n".format(instruction) | ||
prompt += "#Created Prompt#:\r\n" | ||
return prompt |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
base_instruction = "I want you act as a Prompt Rewriter.\r\n \ | ||
Your objective is to rewrite a given prompt into a more complex version to make those famous AI systems (e.g., chatgpt and GPT4) a bit harder to handle.\r\n \ | ||
But the rewritten prompt must be reasonable and must be understood and responded by humans.\r\n \ | ||
Your rewriting cannot omit the non-text parts such as the table and code in #The Given Prompt#:. Also, please do not omit the input in #The Given Prompt#. \r\n \ | ||
You SHOULD complicate the given prompt using the following method: \r\n\ | ||
{} \r\n\ | ||
You should try your best not to make the #Rewritten Prompt# become verbose, #Rewritten Prompt# can only add 10 to 20 words into #The Given Prompt#. \r\n\ | ||
'#The Given Prompt#', '#Rewritten Prompt#', 'given prompt' and 'rewritten prompt' are not allowed to appear in #Rewritten Prompt#\r\n" | ||
|
||
|
||
def createConstraintsPrompt(instruction): | ||
prompt = base_instruction.format("Please add one more constraints/requirements into #The Given Prompt#'") | ||
prompt += "#The Given Prompt#: \r\n {} \r\n".format(instruction) | ||
prompt += "#Rewritten Prompt#:\r\n" | ||
return prompt | ||
|
||
def createDeepenPrompt(instruction): | ||
prompt = base_instruction.format("If #The Given Prompt# contains inquiries about certain issues, the depth and breadth of the inquiry can be increased.") | ||
prompt += "#The Given Prompt#: \r\n {} \r\n".format(instruction) | ||
prompt += "#Rewritten Prompt#:\r\n" | ||
return prompt | ||
|
||
def createConcretizingPrompt(instruction): | ||
prompt = base_instruction.format("Please replace general concepts with more specific concepts.") | ||
prompt += "#The Given Prompt#: \r\n {} \r\n".format(instruction) | ||
prompt += "#Rewritten Prompt#:\r\n" | ||
return prompt | ||
|
||
|
||
def createReasoningPrompt(instruction): | ||
prompt = base_instruction.format("If #The Given Prompt# can be solved with just a few simple thinking processes, you can rewrite it to explicitly request multiple-step reasoning.") | ||
prompt += "#The Given Prompt#: \r\n {} \r\n".format(instruction) | ||
prompt += "#Rewritten Prompt#:\r\n" | ||
return prompt |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
import json | ||
import random | ||
|
||
from openai_access import call_chatgpt | ||
from depth import createConstraintsPrompt, createDeepenPrompt, createConcretizingPrompt, createReasoningPrompt | ||
from breadth import createBreadthPrompt | ||
|
||
|
||
fr = open('alpaca_data_cleaned.json','r') | ||
|
||
all_objs = json.load(fr) | ||
|
||
evol_objs = [] | ||
|
||
|
||
for cur_obj in all_objs: | ||
|
||
instruction = cur_obj['instruction'].strip() + '\r\n'+ cur_obj['input'].strip() | ||
|
||
evol_prompts = [] | ||
evol_prompts.append(createConstraintsPrompt(instruction)) | ||
evol_prompts.append(createDeepenPrompt(instruction)) | ||
evol_prompts.append(createConcretizingPrompt(instruction)) | ||
evol_prompts.append(createReasoningPrompt(instruction)) | ||
evol_prompts.append(createBreadthPrompt(instruction)) | ||
|
||
selected_evol_prompt = random.choice(evol_prompts) | ||
|
||
|
||
evol_instruction = call_chatgpt(selected_evol_prompt) | ||
answer = call_chatgpt(evol_instruction) | ||
|
||
evol_objs.append({"instruction":evol_instruction,"output":answer}) | ||
|
||
|
||
|
||
with open('alpaca_data_evol.json', 'w') as f: | ||
json.dump(evol_objs, f, indent=4) | ||
|
||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
import openai | ||
import time | ||
|
||
openai.api_key = 'your api key' | ||
|
||
|
||
def get_oai_completion(prompt): | ||
|
||
try: | ||
response = openai.ChatCompletion.create( | ||
model="gpt-3.5-turbo", | ||
messages=[ | ||
{"role": "system", "content": "You are a helpful assistant."}, | ||
{"role": "user", "content": prompt}, | ||
|
||
], | ||
temperature=1, | ||
max_tokens=2048, | ||
top_p=0.95, | ||
frequency_penalty=0, | ||
presence_penalty=0, | ||
stop=None | ||
) | ||
res = response["choices"][0]["message"]["content"] | ||
|
||
gpt_output = res | ||
return gpt_output | ||
except requests.exceptions.Timeout: | ||
# Handle the timeout error here | ||
print("The OpenAI API request timed out. Please try again later.") | ||
return None | ||
except openai.error.InvalidRequestError as e: | ||
# Handle the invalid request error here | ||
print(f"The OpenAI API request was invalid: {e}") | ||
return None | ||
except openai.error.APIError as e: | ||
if "The operation was timeout" in str(e): | ||
# Handle the timeout error here | ||
print("The OpenAI API request timed out. Please try again later.") | ||
# time.sleep(3) | ||
return get_oai_completion(prompt) | ||
else: | ||
# Handle other API errors here | ||
print(f"The OpenAI API returned an error: {e}") | ||
return None | ||
except openai.error.RateLimitError as e: | ||
return get_oai_completion(prompt) | ||
|
||
def call_chatgpt(ins): | ||
success = False | ||
re_try_count = 15 | ||
ans = '' | ||
while not success and re_try_count >= 0: | ||
re_try_count -= 1 | ||
try: | ||
ans = get_oai_completion(ins) | ||
success = True | ||
except: | ||
time.sleep(5) | ||
print('retry for sample:', ins) | ||
return ans |