forked from shroominic/codeinterpreter-api
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathparser.py
74 lines (58 loc) · 2.67 KB
/
parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from __future__ import annotations
import re
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.chat_models.base import BaseChatModel
from langchain.output_parsers.json import parse_json_markdown
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from codeinterpreterapi.chains import extract_python_code
class CodeAgentOutputParser(AgentOutputParser):
ai_prefix: str = "AI"
def get_format_instructions(self) -> str:
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
class CodeChatAgentOutputParser(AgentOutputParser):
def __init__(self, llm: BaseChatModel, **kwargs):
super().__init__(**kwargs)
self.llm = llm
def get_format_instructions(self) -> str:
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
raise NotImplementedError
async def aparse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
response = parse_json_markdown(text)
action, action_input = response["action"], response["action_input"]
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
except Exception:
if '"action": "python"' in text:
print("TODO: Not implemented")
# extract python code from text with prompt
text = extract_python_code(text, llm=self.llm) or ""
match = re.search(r"```python\n(.*?)```", text)
if match:
code = match.group(1).replace("\\n", "; ")
return AgentAction("python", code, text)
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "conversational_chat"