From 53c816192cd99f3103b6d645612505cfbe740771 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Thu, 25 May 2023 19:59:06 +0530 Subject: [PATCH 01/13] Added name of file --- superagi/tools/file/write_file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/superagi/tools/file/write_file.py b/superagi/tools/file/write_file.py index 2870bd04d..bbb1ddd93 100644 --- a/superagi/tools/file/write_file.py +++ b/superagi/tools/file/write_file.py @@ -32,6 +32,6 @@ def _execute(self, file_name: str, content: str): try: with open(final_path, 'w', encoding="utf-8") as file: file.write(content) - return "File written to successfully." + return f"File written to successfully - {file_name}" except Exception as err: return f"Error: {err}" From 0291c4d5e3e51962cba3c5e7fa94b0d679ddcb3c Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Thu, 25 May 2023 20:24:48 +0530 Subject: [PATCH 02/13] Added update dir address for resources --- config_template.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config_template.yaml b/config_template.yaml index 7da479235..b326acd23 100644 --- a/config_template.yaml +++ b/config_template.yaml @@ -11,4 +11,5 @@ SEARCH_ENGINE_ID: YOUR_SEARCH_ENIGNE_ID # IF YOU DONT HAVE GOOGLE SERACH KEY, USE THIS SERP_API_KEY: YOUR_SERP_API_KEY -RESOURCES_ROOT_DIR: /tmp/ \ No newline at end of file +RESOURCES_OUTPUT_ROOT_DIR: workspace/output +RESOURCES_INPUT_ROOT_DIR: workspace/input \ No newline at end of file From 4da2e4e4a2dc1682c026416212c0eddc16caf2c1 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Fri, 26 May 2023 18:49:13 +0530 Subject: [PATCH 03/13] Added GPT 3.5 capability --- superagi/agent/agent_prompt_builder.py | 23 +++++++++++++---------- workspace/input/example.txt | 0 workspace/output/whale_facts.txt | 4 ++++ 3 files changed, 17 insertions(+), 10 deletions(-) create mode 100644 workspace/input/example.txt create mode 100644 workspace/output/whale_facts.txt diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index 61b006bfe..b8e995a87 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -91,6 +91,7 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.set_ai_name(ai_name) prompt_builder.set_ai_role(ai_role) base_prompt = ( + "Don't write any greet message instead directly jump to the respose format as your first response and write the goal as the first text thought" "Your decisions must always be made independently " "without seeking user assistance.\n" "Play to your strengths as an LLM and pursue simple " @@ -114,6 +115,7 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.add_constraint("No user assistance") prompt_builder.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' + "If you can't find a tool, use your own knowledge" ) # Add tools to the PromptGenerator object @@ -138,23 +140,24 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: "Reflect on past decisions and strategies to refine your approach.", "Every command has a cost, so be smart and efficient. " "Aim to complete tasks in the least number of steps.", + "As soon as you write the result in file, finish the task" ] for evaluation in evaluations: prompt_builder.add_evaluation(evaluation) response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": {"name": "command name", "args": {"arg name": "value"}}, + } formatted_response_format = json.dumps(response_format, indent=4) prompt_builder.set_response_format(formatted_response_format) # Generate the prompt string prompt_string = prompt_builder.generate_prompt_string() - return prompt_string + return prompt_string \ No newline at end of file diff --git a/workspace/input/example.txt b/workspace/input/example.txt new file mode 100644 index 000000000..e69de29bb diff --git a/workspace/output/whale_facts.txt b/workspace/output/whale_facts.txt new file mode 100644 index 000000000..cf696a1cb --- /dev/null +++ b/workspace/output/whale_facts.txt @@ -0,0 +1,4 @@ +1. Whales are divided into two main groups: baleen whales and toothed whales. +2. Male humpback whales sing complex songs in winter breeding areas that can last up to 20 minutes. +3. The blue whale is the largest animal that ever lived, growing up to 90 feet and weighing as much as 24 elephants. +4. Bowhead whales can live for more than 200 years, and killer whales can live for more than 100 years. \ No newline at end of file From 225f7e53749d5d7dc32b584f5e6db6643e647064 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Fri, 26 May 2023 19:01:20 +0530 Subject: [PATCH 04/13] Added files --- workspace/input/{example.txt => .temp} | 0 workspace/input/dog.txt | 1 - workspace/output/.temp | 0 workspace/output/whale_facts.txt | 4 ---- 4 files changed, 5 deletions(-) rename workspace/input/{example.txt => .temp} (100%) delete mode 100644 workspace/input/dog.txt create mode 100644 workspace/output/.temp delete mode 100644 workspace/output/whale_facts.txt diff --git a/workspace/input/example.txt b/workspace/input/.temp similarity index 100% rename from workspace/input/example.txt rename to workspace/input/.temp diff --git a/workspace/input/dog.txt b/workspace/input/dog.txt deleted file mode 100644 index 6fd8ca020..000000000 --- a/workspace/input/dog.txt +++ /dev/null @@ -1 +0,0 @@ -Dogs have been bred for specific jobs and traits, resulting in over 300 recognized breeds with unique characteristics, temperaments, and abilities. Herding breeds possess high energy and agility to assist farmers in managing livestock, hunting breeds are divided into scent and sight hounds for finding game, guardian breeds are protective watchdogs, and companion breeds offer comfort and companionship. Dogs have proven to be exceptional in adapting to fulfill various roles in human life and enrich our lives through their invaluable services and unwavering loyalty and affection. diff --git a/workspace/output/.temp b/workspace/output/.temp new file mode 100644 index 000000000..e69de29bb diff --git a/workspace/output/whale_facts.txt b/workspace/output/whale_facts.txt deleted file mode 100644 index cf696a1cb..000000000 --- a/workspace/output/whale_facts.txt +++ /dev/null @@ -1,4 +0,0 @@ -1. Whales are divided into two main groups: baleen whales and toothed whales. -2. Male humpback whales sing complex songs in winter breeding areas that can last up to 20 minutes. -3. The blue whale is the largest animal that ever lived, growing up to 90 feet and weighing as much as 24 elephants. -4. Bowhead whales can live for more than 200 years, and killer whales can live for more than 100 years. \ No newline at end of file From 39cbf7bd79dff553b397861601de0be164498805 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Fri, 26 May 2023 19:14:19 +0530 Subject: [PATCH 05/13] Modified max token limit --- superagi/llms/openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/superagi/llms/openai.py b/superagi/llms/openai.py index 8da414fc7..bacca3795 100644 --- a/superagi/llms/openai.py +++ b/superagi/llms/openai.py @@ -30,7 +30,7 @@ def chat_completion(self, messages, max_tokens=4032): model=self.model, messages=messages, temperature=self.temperature, - max_tokens=self.max_tokens, + max_tokens=max_tokens, top_p=self.top_p, frequency_penalty=self.frequency_penalty, presence_penalty=self.presence_penalty From e0f94ad6e926db9dfcad674c4f061de4748cb5de Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 13:05:42 +0530 Subject: [PATCH 06/13] Added tool for LLM thinking --- superagi/tools/thinking/__init__.py | 0 superagi/tools/thinking/tools.py | 42 +++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 superagi/tools/thinking/__init__.py create mode 100644 superagi/tools/thinking/tools.py diff --git a/superagi/tools/thinking/__init__.py b/superagi/tools/thinking/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/superagi/tools/thinking/tools.py b/superagi/tools/thinking/tools.py new file mode 100644 index 000000000..6eb2c3e53 --- /dev/null +++ b/superagi/tools/thinking/tools.py @@ -0,0 +1,42 @@ +import os +import openai +from typing import Type + +from pydantic import BaseModel, Field +from superagi.tools.base_tool import BaseTool +from superagi.config.config import get_config + +# Schema for LLMThinking tool + +class LlmTaskSchema(BaseModel): + task_description: str = Field( + ..., + description="Text describing the task for which the GPT model should generate a response.", + ) + +class LlmThinkingTool(BaseTool): + name = "LlmThinking" + description = ( + "A tool that interacts with OpenAI's GPT models " + "to generate text given a certain task description." + ) + args_schema: Type[LlmTaskSchema] = LlmTaskSchema + + def _execute(self, task_description: str, model_name: str = "text-davinci-002"): + api_key = get_config("OPENAI_API_KEY") + openai.api_key = api_key + + try: + response = openai.Completion.create( + engine=model_name, + prompt=task_description, + max_tokens=150, + n=1, + stop=None, + temperature=0.7, + ) + generated_text = response.choices[0].text.strip() + return generated_text + except openai.OpenAIError as e: + print(e) + return f"Error generating text: {e}" \ No newline at end of file From fdbd1a7d57ab38f7e372ba7d3bc3879c65129010 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 13:22:30 +0530 Subject: [PATCH 07/13] Added Changes for thinking tool --- superagi/agent/agent_prompt_builder.py | 2 +- test.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index b8e995a87..f010604fb 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -91,7 +91,6 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.set_ai_name(ai_name) prompt_builder.set_ai_role(ai_role) base_prompt = ( - "Don't write any greet message instead directly jump to the respose format as your first response and write the goal as the first text thought" "Your decisions must always be made independently " "without seeking user assistance.\n" "Play to your strengths as an LLM and pursue simple " @@ -115,6 +114,7 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.add_constraint("No user assistance") prompt_builder.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' + "You should always try to use LLmthinkingtool first instead of directly jumping to google search" "If you can't find a tool, use your own knowledge" ) diff --git a/test.py b/test.py index 03f12f76f..de5e3925b 100644 --- a/test.py +++ b/test.py @@ -6,6 +6,7 @@ from superagi.tools.google_search.tools import GoogleSearchSchema, GoogleSearchTool from superagi.tools.google_serp_search.tools import GoogleSerpTool from superagi.tools.twitter.send_tweet import SendTweetTool +from superagi.tools.thinking.tools import LlmThinkingTool from superagi.tools.email.read_email import ReadEmailTool from superagi.tools.email.send_email import SendEmailTool from superagi.tools.email.send_email_attachment import SendEmailAttachmentTool @@ -26,6 +27,7 @@ def create_campaign(campaign_name: str): tools = [ + LlmThinkingTool(), GoogleSearchTool(), WriteFileTool(), ReadFileTool(), @@ -43,7 +45,7 @@ def create_campaign(campaign_name: str): -superagi = SuperAgi.from_llm_and_tools("Super AGI", "To solve any complex problems for you", memory, tools, OpenAi(model="gpt-4")) +superagi = SuperAgi.from_llm_and_tools("Super AGI", "To solve any complex problems for you", memory, tools, OpenAi(model="gpt-3.5-turbo")) user_goal=[] user_goal=str(input("Enter your Goals seperated by ',':\n")).split(",") superagi.execute(user_goal) From 5ed02ad4c97733b9c2d03e4853a07c9e5c52c1f9 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 14:07:59 +0530 Subject: [PATCH 08/13] Updated schema class name and formatting --- superagi/agent/agent_prompt_builder.py | 2 +- superagi/tools/thinking/tools.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index f010604fb..c539bd547 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -114,7 +114,7 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.add_constraint("No user assistance") prompt_builder.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' - "You should always try to use LLmthinkingtool first instead of directly jumping to google search" + "You should always try to use LlmThinkingTool first instead of directly jumping to google search" "If you can't find a tool, use your own knowledge" ) diff --git a/superagi/tools/thinking/tools.py b/superagi/tools/thinking/tools.py index 6eb2c3e53..8692ee494 100644 --- a/superagi/tools/thinking/tools.py +++ b/superagi/tools/thinking/tools.py @@ -8,7 +8,7 @@ # Schema for LLMThinking tool -class LlmTaskSchema(BaseModel): +class LlmThinkingSchema(BaseModel): task_description: str = Field( ..., description="Text describing the task for which the GPT model should generate a response.", @@ -20,7 +20,7 @@ class LlmThinkingTool(BaseTool): "A tool that interacts with OpenAI's GPT models " "to generate text given a certain task description." ) - args_schema: Type[LlmTaskSchema] = LlmTaskSchema + args_schema: Type[LlmThinkingSchema] = LlmThinkingSchema def _execute(self, task_description: str, model_name: str = "text-davinci-002"): api_key = get_config("OPENAI_API_KEY") From 3d7ada2d4a5cb3e892a3c69b76645f47e62abd81 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 17:40:00 +0530 Subject: [PATCH 09/13] Added thinking tool logic --- superagi/tools/thinking/tools.py | 36 ++++++++++++++------------------ 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/superagi/tools/thinking/tools.py b/superagi/tools/thinking/tools.py index 8692ee494..f29589613 100644 --- a/superagi/tools/thinking/tools.py +++ b/superagi/tools/thinking/tools.py @@ -1,42 +1,38 @@ import os import openai -from typing import Type +from typing import Type, Optional from pydantic import BaseModel, Field from superagi.tools.base_tool import BaseTool from superagi.config.config import get_config +from superagi.llms.base_llm import BaseLlm +from pydantic import BaseModel, Field, PrivateAttr -# Schema for LLMThinking tool -class LlmThinkingSchema(BaseModel): +class LlmTaskSchema(BaseModel): task_description: str = Field( ..., - description="Text describing the task for which the GPT model should generate a response.", + description="Text describing the task for which the LLM should generate a response.", ) class LlmThinkingTool(BaseTool): + llm: Optional[BaseLlm] = None name = "LlmThinking" description = ( - "A tool that interacts with OpenAI's GPT models " + "A tool that interacts with any given LLM " "to generate text given a certain task description." ) - args_schema: Type[LlmThinkingSchema] = LlmThinkingSchema + args_schema: Type[LlmTaskSchema] = LlmTaskSchema - def _execute(self, task_description: str, model_name: str = "text-davinci-002"): - api_key = get_config("OPENAI_API_KEY") - openai.api_key = api_key + class Config: + arbitrary_types_allowed = True + + def _execute(self, task_description: str): try: - response = openai.Completion.create( - engine=model_name, - prompt=task_description, - max_tokens=150, - n=1, - stop=None, - temperature=0.7, - ) - generated_text = response.choices[0].text.strip() - return generated_text - except openai.OpenAIError as e: + messages = [{"role": "system", "content": task_description}] + result = self.llm.chat_completion(messages) + return result["content"] + except Exception as e: print(e) return f"Error generating text: {e}" \ No newline at end of file From 2ec79a34137153c155e60816605f822674d17c62 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 17:55:46 +0530 Subject: [PATCH 10/13] Added LLm Thinking tool --- superagi/tools/thinking/tools.py | 2 +- test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/superagi/tools/thinking/tools.py b/superagi/tools/thinking/tools.py index f29589613..75c28fe99 100644 --- a/superagi/tools/thinking/tools.py +++ b/superagi/tools/thinking/tools.py @@ -17,7 +17,7 @@ class LlmTaskSchema(BaseModel): class LlmThinkingTool(BaseTool): llm: Optional[BaseLlm] = None - name = "LlmThinking" + name = "LlmThinkingTool" description = ( "A tool that interacts with any given LLM " "to generate text given a certain task description." diff --git a/test.py b/test.py index de5e3925b..30bd6840b 100644 --- a/test.py +++ b/test.py @@ -27,7 +27,7 @@ def create_campaign(campaign_name: str): tools = [ - LlmThinkingTool(), + LlmThinkingTool(llm=OpenAi(model="gpt-3.5-turbo")), GoogleSearchTool(), WriteFileTool(), ReadFileTool(), From 7e82414d49fe2d80dbdd56551dc613aee63d4306 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 18:01:16 +0530 Subject: [PATCH 11/13] Modified Thoughts of LLM --- superagi/agent/output_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/superagi/agent/output_parser.py b/superagi/agent/output_parser.py index 08095db32..bc1ff15ca 100644 --- a/superagi/agent/output_parser.py +++ b/superagi/agent/output_parser.py @@ -40,7 +40,7 @@ def parse(self, text: str) -> AgentGPTAction: format_prefix_green = "\033[92m\033[1m" format_suffix_green = "\033[0m\033[0m" print(format_prefix_green + "Intelligence : " + format_suffix_green) - print(format_prefix_yellow + "Thoughts: " + format_suffix_yellow + parsed["thoughts"]["reasoning"] + "\n") + print(format_prefix_yellow + "Thoughts: " + format_suffix_yellow + parsed["thoughts"]["text"] + "\n") print(format_prefix_yellow + "Reasoning: " + format_suffix_yellow + parsed["thoughts"]["reasoning"] + "\n") print(format_prefix_yellow + "Plan: " + format_suffix_yellow + parsed["thoughts"]["plan"] + "\n") print(format_prefix_yellow + "Criticism: " + format_suffix_yellow + parsed["thoughts"]["criticism"] + "\n") From 67f174b3be2203f83a2166d20cc80d7a6085eed3 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 18:08:21 +0530 Subject: [PATCH 12/13] modified prompt --- superagi/agent/agent_prompt_builder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index 1570e7315..29fccd8e4 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -116,7 +116,6 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' "You should always try to use LlmThinkingTool first instead of directly jumping to google search" - "If you can't find a tool, use your own knowledge" ) # Add tools to the PromptGenerator object From e180811d0fced6d053e2255501bd7e46000e0594 Mon Sep 17 00:00:00 2001 From: COLONAYUSH Date: Sat, 27 May 2023 18:16:47 +0530 Subject: [PATCH 13/13] modifed base prompt --- superagi/agent/agent_prompt_builder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index 29fccd8e4..0f595dc33 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -115,7 +115,6 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools: prompt_builder.add_constraint("Ensure the command and args are as per current plan and reasoning.") prompt_builder.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' - "You should always try to use LlmThinkingTool first instead of directly jumping to google search" ) # Add tools to the PromptGenerator object