Skip to content

Commit

Permalink
Merge branch 'gpt3.5' of github.com:TransformerOptimus/SuperAGI into …
Browse files Browse the repository at this point in the history
…gpt3.5
  • Loading branch information
COLONAYUSH committed May 27, 2023
2 parents 5ed02ad + 3455a39 commit 38dba37
Show file tree
Hide file tree
Showing 22 changed files with 465 additions and 72 deletions.
4 changes: 3 additions & 1 deletion config_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ SEARCH_ENGINE_ID: YOUR_SEARCH_ENIGNE_ID
SERP_API_KEY: YOUR_SERP_API_KEY

RESOURCES_OUTPUT_ROOT_DIR: workspace/output
RESOURCES_INPUT_ROOT_DIR: workspace/input
RESOURCES_INPUT_ROOT_DIR: workspace/output

#ENTER YOUR EMAIL CREDENTIALS TO ACCESS EMAIL TOOL
EMAIL_ADDRESS: YOUR_EMAIL_ADDRESS
Expand All @@ -23,3 +23,5 @@ EMAIL_IMAP_SERVER: imap.gmail.com #Change the IMAP Host if not using Gmail
EMAIL_SIGNATURE: Email sent by SuperAGI
EMAIL_DRAFT_MODE_WITH_FOLDER: YOUR_DRAFTS_FOLDER
EMAIL_ATTACHMENT_BASE_PATH: YOUR_DIRECTORY_FOR_EMAIL_ATTACHMENTS

MAX_TOOL_TOKEN_LIMIT: 600
34 changes: 17 additions & 17 deletions superagi/agent/agent_prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools:
"thinking about similar events will help you remember."
)
prompt_builder.add_constraint("No user assistance")
prompt_builder.add_constraint("Ensure the command and args are as per current plan and reasoning.")
prompt_builder.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
"You should always try to use LlmThinkingTool first instead of directly jumping to google search"
Expand All @@ -125,12 +126,12 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools:
for goal in goals:
prompt_builder.add_goal(goal)

resources = ["Internet access for searches and information gathering.",
"Long Term memory management.",
"GPT-3.5 powered Agents for delegation of simple tasks.",
"File output."]
for resource in resources:
prompt_builder.add_resource(resource)
# resources = ["Internet access for searches and information gathering.",
# "Long Term memory management.",
# "GPT-3.5 powered Agents for delegation of simple tasks.",
# "File output."]
# for resource in resources:
# prompt_builder.add_resource(resource)

# Add performance evaluations to the PromptGenerator object
evaluations = [
Expand All @@ -139,22 +140,21 @@ def get_autogpt_prompt(cls, ai_name:str, ai_role: str, goals: List[str], tools:
"Constructively self-criticize your big-picture behavior constantly.",
"Reflect on past decisions and strategies to refine your approach.",
"Every command has a cost, so be smart and efficient. "
"Aim to complete tasks in the least number of steps.",
"As soon as you write the result in file, finish the task"
"Aim to complete tasks in the least number of steps."
]
for evaluation in evaluations:
prompt_builder.add_evaluation(evaluation)

response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
},
"command": {"name": "command name", "args": {"arg name": "value"}},
}
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
},
"command": {"name": "command name/task name", "description": "command or task description", "args": {"arg name": "value"}},
}
formatted_response_format = json.dumps(response_format, indent=4)
prompt_builder.set_response_format(formatted_response_format)
# Generate the prompt string
Expand Down
119 changes: 71 additions & 48 deletions superagi/agent/output_parser.py
Original file line number Diff line number Diff line change
@@ -1,60 +1,83 @@
import json
from abc import ABC, abstractmethod
from typing import Dict, NamedTuple
from typing import Dict, NamedTuple, List
import re

from superagi.helper.json_cleaner import JsonCleaner


class AgentGPTAction(NamedTuple):
name: str
args: Dict
name: str
args: Dict


class BaseOutputParser(ABC):
@abstractmethod
def parse(self, text: str) -> AgentGPTAction:
"""Return AgentGPTAction"""
class AgentTasks(NamedTuple):
tasks: List[Dict] = []
error: str = ""


def preprocess_json_input(input_str: str) -> str:
# Replace single backslashes with double backslashes,
# while leaving already escaped ones intact
corrected_str = re.sub(
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
)
return corrected_str
class BaseOutputParser(ABC):
@abstractmethod
def parse(self, text: str) -> AgentGPTAction:
"""Return AgentGPTAction"""



class AgentOutputParser(BaseOutputParser):
def parse(self, text: str) -> AgentGPTAction:
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return AgentGPTAction(
name="ERROR",
args={"error": f"Could not parse invalid json: {text}"},
)
try:
format_prefix_yellow = "\033[93m\033[1m"
format_suffix_yellow = "\033[0m\033[0m"
format_prefix_green = "\033[92m\033[1m"
format_suffix_green = "\033[0m\033[0m"
print(format_prefix_green + "Intelligence : " + format_suffix_green)
print(format_prefix_yellow + "Thoughts: " + format_suffix_yellow + parsed["thoughts"]["reasoning"]+"\n")
print(format_prefix_yellow + "Reasoning: " + format_suffix_yellow + parsed["thoughts"]["reasoning"] + "\n")
print(format_prefix_yellow + "Plan: " + format_suffix_yellow + parsed["thoughts"]["plan"] + "\n")
print(format_prefix_yellow + "Criticism: " + format_suffix_yellow + parsed["thoughts"]["criticism"] + "\n")
print(format_prefix_green + "Action : "+ format_suffix_green)
print(format_prefix_yellow + "Tool: "+ format_suffix_yellow + parsed["command"]["name"] + "\n")
# print(format_prefix_yellow + "Args: "+ format_suffix_yellow + parsed["command"]["args"] + "\n")
return AgentGPTAction(
name=parsed["command"]["name"],
args=parsed["command"]["args"],
)
except (KeyError, TypeError):
# If the command is null or incomplete, return an erroneous tool
return AgentGPTAction(
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
)
def parse(self, text: str) -> AgentGPTAction:
try:
print(text)
text = JsonCleaner.check_and_clean_json(text)
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
return AgentGPTAction(
name="ERROR",
args={"error": f"Could not parse invalid json: {text}"},
)
try:
format_prefix_yellow = "\033[93m\033[1m"
format_suffix_yellow = "\033[0m\033[0m"
format_prefix_green = "\033[92m\033[1m"
format_suffix_green = "\033[0m\033[0m"
print(format_prefix_green + "Intelligence : " + format_suffix_green)
print(format_prefix_yellow + "Thoughts: " + format_suffix_yellow + parsed["thoughts"]["reasoning"] + "\n")
print(format_prefix_yellow + "Reasoning: " + format_suffix_yellow + parsed["thoughts"]["reasoning"] + "\n")
print(format_prefix_yellow + "Plan: " + format_suffix_yellow + parsed["thoughts"]["plan"] + "\n")
print(format_prefix_yellow + "Criticism: " + format_suffix_yellow + parsed["thoughts"]["criticism"] + "\n")
print(format_prefix_green + "Action : " + format_suffix_green)
print(format_prefix_yellow + "Tool: " + format_suffix_yellow + parsed["command"]["name"] + "\n")
# print(format_prefix_yellow + "Args: "+ format_suffix_yellow + parsed["command"]["args"] + "\n")

return AgentGPTAction(
name=parsed["command"]["name"],
args=parsed["command"]["args"],
)
except (KeyError, TypeError):
# If the command is null or incomplete, return an erroneous tool
return AgentGPTAction(
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
)

def parse_tasks(self, text: str) -> AgentTasks:
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = JsonCleaner.preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return AgentTasks(
error=f"Could not parse invalid json: {text}",
)
try:
print("Tasks: ", parsed["tasks"])
return AgentTasks(
tasks=parsed["tasks"]
)
except (KeyError, TypeError):
# If the command is null or incomplete, return an erroneous tool
return AgentTasks(
error=f"Incomplete tool args: {parsed}",
)


30 changes: 25 additions & 5 deletions superagi/agent/super_agi.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
# agent can run the task queue as well with long term memory
from __future__ import annotations

from typing import Tuple

from pydantic import ValidationError
from pydantic.types import List
import time
Expand Down Expand Up @@ -71,6 +73,7 @@ def execute(self, goals: List[str]):
)
iteration = 10
i = 0
token_limit = TokenCounter.token_limit(self.llm.get_model())
while True:
format_prefix_yellow = "\033[93m\033[1m"
format_suffix_yellow = "\033[0m\033[0m"
Expand All @@ -82,20 +85,25 @@ def execute(self, goals: List[str]):
return
# print(self.tools)
autogpt_prompt = AgentPromptBuilder.get_autogpt_prompt(self.ai_name, self.ai_role, goals, self.tools)
autogpt_prompt_to_print = AgentPromptToPrintBuilder.get_autogpt_prompt(self.ai_name, self.ai_role, goals, self.tools)
# autogpt_prompt_to_print = AgentPromptToPrintBuilder.get_autogpt_prompt(self.ai_name, self.ai_role, goals, self.tools)
# generated_prompt = self.get_analytics_insight_prompt(analytics_string)
messages = [{"role": "system", "content": autogpt_prompt},
{"role": "system", "content": f"The current time and date is {time.strftime('%c')}"}]

for history in self.full_message_history[-10:]:
# print(history.type + " : ", history.content)
base_token_limit = TokenCounter.count_message_tokens(messages, self.llm.get_model())
past_messages, current_messages = self.split_history(self.full_message_history,
token_limit - base_token_limit - 500)
for history in current_messages:
messages.append({"role": history.type, "content": history.content})
messages.append({"role": "user", "content": user_input})

# print(autogpt_prompt)
print(autogpt_prompt_to_print)
# print(autogpt_prompt_to_print)
# Discontinue if continuous limit is reached
# print("----------------------------------")
# print(messages)
# print("----------------------------------")
current_tokens = TokenCounter.count_message_tokens(messages, self.llm.get_model())
token_limit = TokenCounter.token_limit(self.llm.get_model())

# spinner = Spinners.dots12
# spinner.start()
Expand Down Expand Up @@ -131,6 +139,7 @@ def execute(self, goals: List[str]):
# print(assistant_reply)
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
# print("Action: ", action)

if action.name == FINISH:
print(format_prefix_green + "\nTask Finished :) \n" + format_suffix_green)
Expand Down Expand Up @@ -165,6 +174,17 @@ def execute(self, goals: List[str]):
print(format_prefix_green + "Interation completed moving to next iteration!" + format_suffix_green)
pass

def split_history(self, history: List[BaseMessage], pending_token_limit: int) -> Tuple[List[BaseMessage], List[BaseMessage]]:
hist_token_count = 0
i = len(history)
for message in reversed(history):
token_count = TokenCounter.count_message_tokens([{"role": message.type, "content": message.content}], self.llm.get_model())
hist_token_count += token_count
if hist_token_count > pending_token_limit:
return history[:i], history[i:]
i -= 1
return [], history

def call_llm(self):
pass

Expand Down
80 changes: 80 additions & 0 deletions superagi/helper/json_cleaner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import json
import re


class JsonCleaner:

@classmethod
def check_and_clean_json(cls, json_string: str):
try:
json_string = json_string.replace("\\t", "")
json_string = json_string.replace("\\n", "")
json_string = cls.remove_escape_sequences(json_string)
json.loads(json_string)
return json_string
except json.JSONDecodeError as e:
# If the json is invalid, try to clean it up
json_string = cls.preprocess_json_input(json_string)
json_string = cls.add_quotes_to_property_names(json_string)
json_string = cls.remove_escape_sequences(json_string)
json_string = cls.balance_braces(json_string)
try:
json.loads(json_string)
return json_string
except json.JSONDecodeError as e:
print(json_string)
# If the json is still invalid, try to extract the json section
json_string = cls.extract_json_section(json_string)
return json_string
return json_string

@classmethod
def preprocess_json_input(cls, input_str: str) -> str:
# Replace single backslashes with double backslashes,
# while leaving already escaped ones intact
corrected_str = re.sub(
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
)
return corrected_str

@classmethod
def extract_json_section(cls, input_str: str = ""):
try:
first_brace_index = input_str.index("{")
final_json = input_str[first_brace_index:]
last_brace_index = final_json.rindex("}")
final_json = final_json[: last_brace_index + 1]
return final_json
except ValueError:
pass
return input_str

@classmethod
def remove_escape_sequences(cls, string):
return string.encode('utf-8').decode('unicode_escape')

@classmethod
def add_quotes_to_property_names(cls, json_string: str) -> str:
def replace(match: re.Match) -> str:
return f'"{match.group(1)}":'

json_string = re.sub(r'(\b\w+\b):', replace, json_string)

return json_string

@classmethod
def balance_braces(cls, json_string: str) -> str:
open_braces_count = json_string.count('{')
closed_braces_count = json_string.count('}')

while closed_braces_count > open_braces_count:
json_string = json_string.rstrip("}")
closed_braces_count -= 1

open_braces_count = json_string.count('{')
closed_braces_count = json_string.count('}')

if open_braces_count > closed_braces_count:
json_string += '}' * (open_braces_count - closed_braces_count)

return json_string
6 changes: 6 additions & 0 deletions superagi/helper/token_counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,9 @@ def count_message_tokens(messages: List[BaseMessage], model: str = "gpt-3.5-turb

num_tokens += 3
return num_tokens

@staticmethod
def count_text_tokens(message: str) -> int:
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = len(encoding.encode(message)) + 4
return num_tokens
6 changes: 6 additions & 0 deletions superagi/tools/base_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from pydantic import BaseModel, Field, create_model, validate_arguments, Extra
from inspect import signature

from superagi.config.config import get_config


class SchemaSettings:
"""Configuration for the pydantic model."""
Expand Down Expand Up @@ -72,6 +74,10 @@ def args(self):
def _execute(self, *args: Any, **kwargs: Any):
pass

@property
def max_token_limit(self):
return get_config("MAX_TOOL_TOKEN_LIMIT", 600)

def _parse_input(
self,
tool_input: Union[str, Dict],
Expand Down
Loading

0 comments on commit 38dba37

Please sign in to comment.