-
Notifications
You must be signed in to change notification settings - Fork 135
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* add text generation requirement * add huggingface api key input * add llamanager file as manager * add llama analysis skills * add llama prompt builder * add llama write prd skills * add test llama analysis skill * fixed the skill class names * remove the configuration * change the test file Productanalysis name
- Loading branch information
1 parent
88937ad
commit f2bf371
Showing
7 changed files
with
276 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -33,3 +33,4 @@ redis==4.6.0 | |
rq==1.15.1 | ||
eventlet==0.33.3 | ||
tiktoken==0.5.1 | ||
text-generation==0.6.1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
import os | ||
from solidgpt.src.configuration.configreader import ConfigReader | ||
from text_generation import Client | ||
from solidgpt.src.manager.promptresource import llama_v2_prompt | ||
|
||
class LLAManager: | ||
_instance = None | ||
|
||
def __new__(cls): | ||
if cls._instance is None: | ||
cls._instance = super(LLAManager, cls).__new__(cls) | ||
return cls._instance | ||
|
||
def __init__(self, if_show_reply=False): | ||
self.llama2_base_url = ConfigReader().get_property("HF_API_LLAMA2_BASE") | ||
self.llama2_api_key = ConfigReader().get_property("HF_API_KEY") | ||
self.llama_models_container = {} | ||
self.if_show_reply = if_show_reply | ||
|
||
def create_model(self, prompt, llama_api, llama_model_label, temperature=1, model=None): | ||
if model is None: | ||
model = self.llama2_base_url # Use LLAMA2 base URL as the model | ||
llama_model = LLamaModel(prompt, self.llama2_api_key, self.llama2_base_url, self.if_show_reply, temperature) | ||
self.llama_models_container[llama_model_label] = llama_model | ||
return llama_model | ||
|
||
def create_and_chat_with_model(self, prompt, llama_model_label, input_message, temperature=0.1, model=None): | ||
llama_model = self.create_model(prompt, llama_model_label, temperature, model) | ||
return llama_model.chat_with_model(input_message) | ||
|
||
def get_llama_model(self, llama_model_label): | ||
return self.llama_models_container.get(llama_model_label) | ||
|
||
def remove_llama_model(self, llama_model_label): | ||
self.llama_models_container.pop(llama_model_label, None) | ||
|
||
class LLamaModel: | ||
def __init__(self, prompt, api, model, if_show_reply=True, temperature=0.1): | ||
self.prompt = prompt | ||
self.api = api | ||
self.model = model | ||
self.messages = [{"role": "system", "content": self.prompt}] | ||
self.last_reply = None | ||
self.if_show_reply = if_show_reply | ||
self.temperature = temperature | ||
|
||
def chat_with_model(self, input_message): | ||
self.messages.append({"role": "user", "content": input_message}) | ||
self._run_model() | ||
return self.last_reply | ||
|
||
def _run_model(self): | ||
client = Client(self.model, headers={"Authorization": f"Bearer {self.api}"}, timeout=120) | ||
chat = client.generate( | ||
llama_v2_prompt(self.messages), # Convert messages to LLAMA2 prompt | ||
temperature=self.temperature, | ||
max_new_tokens=1000 | ||
) | ||
reply = chat.generated_text | ||
if self.if_show_reply: | ||
print(f"LLAMA2: {reply}") | ||
self.messages.append({"role": "assistant", "content": reply}) | ||
self.last_reply = reply | ||
|
||
def add_background(self, background_message): | ||
self.messages.append({"role": "assistant", "content": background_message}) | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
from solidgpt.src.manager.llamanager import LLAManager | ||
from solidgpt.src.manager.promptresource import PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE, PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE, PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE, PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, build_gpt_prompt | ||
from solidgpt.src.util.util import * | ||
from solidgpt.src.workskill.workskill import * | ||
|
||
class ProductAnalysisLlama(WorkSkill): | ||
def __init__(self): | ||
super().__init__() | ||
self.llm_manager = LLAManager._instance | ||
self.name = SKILL_NAME_ANALYSIS_PRODUCT | ||
self.repo_summary = SkillInput( | ||
"Product Analysis Repo Summary", | ||
SkillIOParamCategory.PlainText, | ||
) | ||
self.additional_info = SkillInput( | ||
"Product Analysis Additional Info", | ||
SkillIOParamCategory.PlainText, | ||
) | ||
self.requirements = SkillInput( | ||
"Product Analysis Requirements", | ||
SkillIOParamCategory.PlainText, | ||
) | ||
self.add_input(self.repo_summary) | ||
self.add_input(self.additional_info) | ||
self.add_input(self.requirements) | ||
self.output_md = SkillOutput( | ||
"Requirments Analysis Markdown", | ||
SkillIOParamCategory.PlainText, | ||
) | ||
self.add_output(self.output_md) | ||
self.additional_info_content = None | ||
self.repo_summary_content = None | ||
self.requirements_content = None | ||
|
||
def _read_input(self): | ||
if self.additional_info is None: | ||
self.additional_info_content = self.additional_info.content | ||
if self.repo_summary_content is None: | ||
self.repo_summary_content = self.__get_input_content(self.repo_summary) | ||
if self.requirements_content is None: | ||
self.requirements_content = self.requirements.content | ||
|
||
def __get_input_content(self, skill_input : SkillInput): | ||
return load_from_text(self.get_input_path(skill_input), extension=".txt") | ||
|
||
def execution_impl(self): | ||
print("Generate product analysis here...") | ||
product_analysis = self._run_product_analysis_model() | ||
save_to_md2(self.output_md.param_path, product_analysis) | ||
self._save_to_result_cache(self.output_md, product_analysis) | ||
return | ||
|
||
def _run_product_analysis_model(self): | ||
logging.info("Running product analysis model...") | ||
prompt = build_gpt_prompt(PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE) | ||
model = self.llm_manager.create_model( | ||
prompt=prompt, | ||
llama_model_label="product_brainstorm", | ||
temperature=0.01, | ||
) | ||
analysis = model.chat_with_model(self.__get_model_input()) | ||
logging.info("Product analysis report: %s", analysis) | ||
return analysis | ||
|
||
def __get_model_input(self): | ||
return f'''Requirements: {self.requirements_content} \n Product Instruction: {self.repo_summary_content} \n Product additional background information: {self.additional_info_content}''' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
from solidgpt.src.manager.llamanager import LLAManager | ||
from solidgpt.src.manager.promptresource import PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE, PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE, PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, build_gpt_prompt | ||
from solidgpt.src.util.util import * | ||
from solidgpt.src.workskill.workskill import * | ||
|
||
class WritePRDLlama(WorkSkill): | ||
def __init__(self): | ||
super().__init__() | ||
self.llm_manager = LLAManager._instance | ||
self.name = SKILL_NAME_WRITE_PRODUCT_REQUIREMENTS_DOCUMENTATION | ||
self.input_product_key_info = SkillInput( | ||
"Design Doc", | ||
SkillIOParamCategory.PlainText, | ||
) | ||
self.add_input(self.input_product_key_info) | ||
self.output_md = SkillOutput( | ||
"Write prd Model PRD Result", | ||
SkillIOParamCategory.ProductRequirementsDocument, | ||
) | ||
self.add_output(self.output_md) | ||
self.input_content = None | ||
|
||
def _read_input(self): | ||
input_path = self.get_input_path(self.input_product_key_info) | ||
|
||
# if input is not a path, infer it as a string content | ||
try: | ||
self.input_content = load_from_text(input_path, extension=".md") | ||
except Exception as e: | ||
self.input_content = self.input_product_key_info.content | ||
|
||
def execution_impl(self): | ||
print("Printing PRD result here...") | ||
brain_storm_product_info = self._run_product_brainstorm_model() | ||
prd = self.__run_write_prd_model(brain_storm_product_info) | ||
self._save_to_result_cache(self.output_md, prd) | ||
save_to_md2(self.output_md.param_path, prd) | ||
return | ||
|
||
def __run_write_prd_model(self, brain_storm_product_info): | ||
logging.info("Running write prd model...") | ||
prompt = build_gpt_prompt(PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE) | ||
return self.llm_manager.create_and_chat_with_model( | ||
prompt=prompt, | ||
llama_model_label="write_prd", | ||
input_message=brain_storm_product_info | ||
) | ||
|
||
def _run_product_brainstorm_model(self): | ||
logging.info("Running product brainstorm model...") | ||
prompt = build_gpt_prompt(PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE) | ||
model = self.llm_manager.create_model( | ||
prompt=prompt, | ||
llama_model_label="product_brainstorm", | ||
temperature=0.01, | ||
) | ||
brainstorm = model.chat_with_model(self.input_content) | ||
logging.info("Brainstorm result: %s", brainstorm) | ||
return brainstorm |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
from solidgpt.src.manager.initializer import Initializer | ||
from solidgpt.src.workgraph.workgraph import * | ||
from solidgpt.src.workskill.skills.llama_analysis import ProductAnalysisLlama | ||
|
||
|
||
TEST_SKILL_WORKSPACE = os.path.join(TEST_DIR, "workskill", "skills", "workspace") | ||
|
||
def run_test(): | ||
Initializer() | ||
app = WorkGraph() | ||
skill: WorkSkill = ProductAnalysisLlama() | ||
requirement_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "ProductRequirements.txt") | ||
schema_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "RepoSchema.txt") | ||
introduction_input_path = os.path.join(TEST_SKILL_WORKSPACE, "in", "ProductIntroduction.txt") | ||
skill.init_config( | ||
[ | ||
{ | ||
"param_path": introduction_input_path, | ||
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING", | ||
"load_from_output_id": -1 | ||
}, | ||
{ | ||
"param_path": schema_input_path, | ||
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING", | ||
"load_from_output_id": -1 | ||
}, | ||
{ | ||
"param_path": requirement_input_path, | ||
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING", | ||
"load_from_output_id": -1 | ||
}, | ||
], | ||
[ | ||
{ | ||
"id": 1 | ||
} | ||
]) | ||
node = WorkNode("1", skill) | ||
app.add_node(node) | ||
app.init_node_dependencies() | ||
# app.save_data(os.path.join(TEST_SKILL_WORKSPACE, "config", "config_data.json")) | ||
app.execute() | ||
|
||
if __name__ == "__main__": | ||
run_test() |