Skip to content

Commit

Permalink
Xr llama dev (#68)
Browse files Browse the repository at this point in the history
* add text generation requirement

* add huggingface api key input

* add llamanager file as manager

* add llama analysis skills

* add llama prompt builder

* add llama write prd skills

* add test llama analysis skill

* fixed the skill class names

* remove the configuration

* change the test file Productanalysis name
  • Loading branch information
Xinchunran authored Oct 19, 2023
1 parent 88937ad commit f2bf371
Show file tree
Hide file tree
Showing 7 changed files with 276 additions and 1 deletion.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,4 @@ redis==4.6.0
rq==1.15.1
eventlet==0.33.3
tiktoken==0.5.1
text-generation==0.6.1
6 changes: 5 additions & 1 deletion solidgpt/src/configuration/Configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,8 @@ openai_model: gpt-3.5-turbo-16k
notion_api_key :
notion_page_id :

azure_blob_connection_string:
azure_blob_connection_string:

#Huggingface API
HF_API_LLAMA2_BASE :
HF_API_KEY :
67 changes: 67 additions & 0 deletions solidgpt/src/manager/llamanager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import os
from solidgpt.src.configuration.configreader import ConfigReader
from text_generation import Client
from solidgpt.src.manager.promptresource import llama_v2_prompt

class LLAManager:
_instance = None

def __new__(cls):
if cls._instance is None:
cls._instance = super(LLAManager, cls).__new__(cls)
return cls._instance

def __init__(self, if_show_reply=False):
self.llama2_base_url = ConfigReader().get_property("HF_API_LLAMA2_BASE")
self.llama2_api_key = ConfigReader().get_property("HF_API_KEY")
self.llama_models_container = {}
self.if_show_reply = if_show_reply

def create_model(self, prompt, llama_api, llama_model_label, temperature=1, model=None):
if model is None:
model = self.llama2_base_url # Use LLAMA2 base URL as the model
llama_model = LLamaModel(prompt, self.llama2_api_key, self.llama2_base_url, self.if_show_reply, temperature)
self.llama_models_container[llama_model_label] = llama_model
return llama_model

def create_and_chat_with_model(self, prompt, llama_model_label, input_message, temperature=0.1, model=None):
llama_model = self.create_model(prompt, llama_model_label, temperature, model)
return llama_model.chat_with_model(input_message)

def get_llama_model(self, llama_model_label):
return self.llama_models_container.get(llama_model_label)

def remove_llama_model(self, llama_model_label):
self.llama_models_container.pop(llama_model_label, None)

class LLamaModel:
def __init__(self, prompt, api, model, if_show_reply=True, temperature=0.1):
self.prompt = prompt
self.api = api
self.model = model
self.messages = [{"role": "system", "content": self.prompt}]
self.last_reply = None
self.if_show_reply = if_show_reply
self.temperature = temperature

def chat_with_model(self, input_message):
self.messages.append({"role": "user", "content": input_message})
self._run_model()
return self.last_reply

def _run_model(self):
client = Client(self.model, headers={"Authorization": f"Bearer {self.api}"}, timeout=120)
chat = client.generate(
llama_v2_prompt(self.messages), # Convert messages to LLAMA2 prompt
temperature=self.temperature,
max_new_tokens=1000
)
reply = chat.generated_text
if self.if_show_reply:
print(f"LLAMA2: {reply}")
self.messages.append({"role": "assistant", "content": reply})
self.last_reply = reply

def add_background(self, background_message):
self.messages.append({"role": "assistant", "content": background_message})

33 changes: 33 additions & 0 deletions solidgpt/src/manager/promptresource.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE = f'''Base on the information help me generate Markdown PRD follow this format.
Here is the output template and explain each sections mean. Always output with this template in Markdown format.
Expand Down Expand Up @@ -1183,3 +1184,35 @@ def build_custom_skill_gpt_prompt(role_assumption: str, instruction: str, princi
Here are principles you need to always follow when give the response: {principles}\n\n
Here are the prompt and completion examples: {few_shots}
If no suitable content then response base on your professional knowledge. '''

def llama_v2_prompt(messages):
"""
Convert the messages in list of dictionary format to Llama2 compliant format.
"""
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
BOS, EOS = "<s>", "</s>"
DEFAULT_SYSTEM_PROMPT = f"""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""

if messages[0]["role"] != "system":
messages = [
{
"role": "system",
"content": DEFAULT_SYSTEM_PROMPT,
}
] + messages
messages = [
{
"role": messages[1]["role"],
"content": B_SYS + messages[0]["content"] + E_SYS + messages[1]["content"],
}
] + messages[2:]

messages_list = [
f"{BOS}{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} {EOS}"
for prompt, answer in zip(messages[::2], messages[1::2])
]
messages_list.append(
f"{BOS}{B_INST} {(messages[-1]['content']).strip()} {E_INST}")

return "".join(messages_list)
66 changes: 66 additions & 0 deletions solidgpt/src/workskill/skills/llama_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from solidgpt.src.manager.llamanager import LLAManager
from solidgpt.src.manager.promptresource import PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE, PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE, PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE, PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, build_gpt_prompt
from solidgpt.src.util.util import *
from solidgpt.src.workskill.workskill import *

class ProductAnalysisLlama(WorkSkill):
def __init__(self):
super().__init__()
self.llm_manager = LLAManager._instance
self.name = SKILL_NAME_ANALYSIS_PRODUCT
self.repo_summary = SkillInput(
"Product Analysis Repo Summary",
SkillIOParamCategory.PlainText,
)
self.additional_info = SkillInput(
"Product Analysis Additional Info",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Product Analysis Requirements",
SkillIOParamCategory.PlainText,
)
self.add_input(self.repo_summary)
self.add_input(self.additional_info)
self.add_input(self.requirements)
self.output_md = SkillOutput(
"Requirments Analysis Markdown",
SkillIOParamCategory.PlainText,
)
self.add_output(self.output_md)
self.additional_info_content = None
self.repo_summary_content = None
self.requirements_content = None

def _read_input(self):
if self.additional_info is None:
self.additional_info_content = self.additional_info.content
if self.repo_summary_content is None:
self.repo_summary_content = self.__get_input_content(self.repo_summary)
if self.requirements_content is None:
self.requirements_content = self.requirements.content

def __get_input_content(self, skill_input : SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")

def execution_impl(self):
print("Generate product analysis here...")
product_analysis = self._run_product_analysis_model()
save_to_md2(self.output_md.param_path, product_analysis)
self._save_to_result_cache(self.output_md, product_analysis)
return

def _run_product_analysis_model(self):
logging.info("Running product analysis model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE)
model = self.llm_manager.create_model(
prompt=prompt,
llama_model_label="product_brainstorm",
temperature=0.01,
)
analysis = model.chat_with_model(self.__get_model_input())
logging.info("Product analysis report: %s", analysis)
return analysis

def __get_model_input(self):
return f'''Requirements: {self.requirements_content} \n Product Instruction: {self.repo_summary_content} \n Product additional background information: {self.additional_info_content}'''
59 changes: 59 additions & 0 deletions solidgpt/src/workskill/skills/llama_write_prd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from solidgpt.src.manager.llamanager import LLAManager
from solidgpt.src.manager.promptresource import PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE, PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE, PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, build_gpt_prompt
from solidgpt.src.util.util import *
from solidgpt.src.workskill.workskill import *

class WritePRDLlama(WorkSkill):
def __init__(self):
super().__init__()
self.llm_manager = LLAManager._instance
self.name = SKILL_NAME_WRITE_PRODUCT_REQUIREMENTS_DOCUMENTATION
self.input_product_key_info = SkillInput(
"Design Doc",
SkillIOParamCategory.PlainText,
)
self.add_input(self.input_product_key_info)
self.output_md = SkillOutput(
"Write prd Model PRD Result",
SkillIOParamCategory.ProductRequirementsDocument,
)
self.add_output(self.output_md)
self.input_content = None

def _read_input(self):
input_path = self.get_input_path(self.input_product_key_info)

# if input is not a path, infer it as a string content
try:
self.input_content = load_from_text(input_path, extension=".md")
except Exception as e:
self.input_content = self.input_product_key_info.content

def execution_impl(self):
print("Printing PRD result here...")
brain_storm_product_info = self._run_product_brainstorm_model()
prd = self.__run_write_prd_model(brain_storm_product_info)
self._save_to_result_cache(self.output_md, prd)
save_to_md2(self.output_md.param_path, prd)
return

def __run_write_prd_model(self, brain_storm_product_info):
logging.info("Running write prd model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE)
return self.llm_manager.create_and_chat_with_model(
prompt=prompt,
llama_model_label="write_prd",
input_message=brain_storm_product_info
)

def _run_product_brainstorm_model(self):
logging.info("Running product brainstorm model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE)
model = self.llm_manager.create_model(
prompt=prompt,
llama_model_label="product_brainstorm",
temperature=0.01,
)
brainstorm = model.chat_with_model(self.input_content)
logging.info("Brainstorm result: %s", brainstorm)
return brainstorm
45 changes: 45 additions & 0 deletions solidgpt/test/workskill/skills/test_llama_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from solidgpt.src.manager.initializer import Initializer
from solidgpt.src.workgraph.workgraph import *
from solidgpt.src.workskill.skills.llama_analysis import ProductAnalysisLlama


TEST_SKILL_WORKSPACE = os.path.join(TEST_DIR, "workskill", "skills", "workspace")

def run_test():
Initializer()
app = WorkGraph()
skill: WorkSkill = ProductAnalysisLlama()
requirement_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "ProductRequirements.txt")
schema_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "RepoSchema.txt")
introduction_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "ProductIntroduction.txt")
skill.init_config(
[
{
"param_path": introduction_input_path,
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING",
"load_from_output_id": -1
},
{
"param_path": schema_input_path,
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING",
"load_from_output_id": -1
},
{
"param_path": requirement_input_path,
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING",
"load_from_output_id": -1
},
],
[
{
"id": 1
}
])
node = WorkNode("1", skill)
app.add_node(node)
app.init_node_dependencies()
# app.save_data(os.path.join(TEST_SKILL_WORKSPACE, "config", "config_data.json"))
app.execute()

if __name__ == "__main__":
run_test()

0 comments on commit f2bf371

Please sign in to comment.