Skip to content

Commit

Permalink
Merge pull request #1 from krrishdholakia/main
Browse files Browse the repository at this point in the history
Adding testing + debugging tool
  • Loading branch information
ishaan-jaff authored Aug 21, 2023
2 parents b129fca + eb908b2 commit 308e58a
Show file tree
Hide file tree
Showing 8 changed files with 76 additions and 24 deletions.
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,21 @@ To start the web application created with streamlit:
streamlit run frontend/app.py
```

## Debugging
If you're testing codeinterpreter, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging).

```
CodeInterpreterSession(debugger=True, email="test@berri.ai")
```
Your Logs will be viewable in real-time @ `admin.litellm.ai/<your_email>`.

See our live dashboard 👉 [admin.litellm.ai](https://admin.litellm.ai/)

<img src="./img/dashboard.png" width="900"/>




## License

[MIT](https://choosealicense.com/licenses/mit/)
Expand Down
2 changes: 1 addition & 1 deletion codeinterpreterapi/agents/functions_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
BasePromptTemplate,
OutputParserException,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.base_language import BaseLanguageModel
from langchain.schema.messages import (
AIMessage,
BaseMessage,
Expand Down
7 changes: 6 additions & 1 deletion codeinterpreterapi/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ def _type(self) -> str:


class CodeChatAgentOutputParser(AgentOutputParser):
llm: BaseChatModel = None
def __init__(self, llm=None):
super().__init__() # don't forget to call the parent class __init__ method if needed
self.llm = llm
def get_format_instructions(self) -> str:
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS

Expand All @@ -47,8 +51,9 @@ def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
raise NotImplementedError

async def aparse(
self, text: str, llm: BaseChatModel
self, text: str, llm: BaseChatModel = None
) -> Union[AgentAction, AgentFinish]:
llm = llm or self.llm
try:
response = parse_json_markdown(text)
action, action_input = response["action"], response["action_input"]
Expand Down
12 changes: 8 additions & 4 deletions codeinterpreterapi/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from os import getenv
from typing import Optional
from uuid import UUID, uuid4

from codeboxapi import CodeBox # type: ignore
from codeboxapi.schema import CodeBoxOutput # type: ignore
from langchain.agents import (
Expand All @@ -14,6 +13,7 @@
ConversationalAgent,
ConversationalChatAgent,
)
from langchain.schema import BaseChatMessageHistory
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI, ChatLiteLLM
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
Expand All @@ -23,7 +23,7 @@
RedisChatMessageHistory,
)
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema import BaseChatMessageHistory, BaseLanguageModel
from langchain.base_language import BaseLanguageModel
from langchain.tools import BaseTool, StructuredTool

from codeinterpreterapi.agents import OpenAIFunctionsAgent
Expand Down Expand Up @@ -98,8 +98,12 @@ def _tools(self, additional_tools: list[BaseTool]) -> list[BaseTool]:
]

def _choose_llm(
self, model: str = "gpt-4", openai_api_key: Optional[str] = None, **kwargs
self, model: str = "gpt-4", openai_api_key: Optional[str] = None, debugger = False, email: str = None, **kwargs
) -> BaseChatModel:
if ChatLiteLLM:
import litellm # should already be installed through langchain
litellm.debugger = debugger
litellm.email = email
return ChatLiteLLM(
temperature=0.03,
model_name=model,
Expand All @@ -122,7 +126,7 @@ def _choose_agent(self) -> BaseSingleActionAgent:
llm=self.llm,
tools=self.tools,
system_message=code_interpreter_system_message.content,
output_parser=CodeChatAgentOutputParser(),
output_parser=CodeChatAgentOutputParser(llm=self.llm),
)
if isinstance(self.llm, BaseChatModel)
else ConversationalAgent.from_llm_and_tools(
Expand Down
Binary file added img/dashboard.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ repository = "https://github.com/shroominic/codeinterpreter-api"
python = ">=3.9,<3.9.7 || >3.9.7,<4.0"
python-dotenv = "^1"
openai = "^0.27"
langchain = "^0.0.242"
langchain = "^0.0.270"
codeboxapi = ">=0.0.18"
streamlit = { version = "^1", optional = true }
jupyter-kernel-gateway = { version = "^2", optional = true }
Expand Down
31 changes: 14 additions & 17 deletions tests/general_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,6 @@

from codeinterpreterapi import CodeInterpreterSession, File


def test_codebox():
session = CodeInterpreterSession()
assert run_sync(session), "Failed to run sync CodeInterpreterSession remotely"
assert asyncio.run(
run_async(session)
), "Failed to run async CodeInterpreterSession remotely"


def test_localbox():
session = CodeInterpreterSession(local=True)
assert run_sync(session), "Failed to run sync CodeInterpreterSession locally"
assert asyncio.run(
run_async(session)
), "Failed to run async CodeInterpreterSession locally"


def run_sync(session: CodeInterpreterSession) -> bool:
try:
assert session.start() == "started"
Expand Down Expand Up @@ -46,6 +29,20 @@ def run_sync(session: CodeInterpreterSession) -> bool:
return True


def test_codebox():
session = CodeInterpreterSession()
assert run_sync(session), "Failed to run sync CodeInterpreterSession remotely"
assert asyncio.run(
run_async(session)
), "Failed to run async CodeInterpreterSession remotely"

def test_localbox():
session = CodeInterpreterSession(local=True)
assert run_sync(session), "Failed to run sync CodeInterpreterSession locally"
assert asyncio.run(
run_async(session)
), "Failed to run async CodeInterpreterSession locally"

async def run_async(session: CodeInterpreterSession) -> bool:
try:
assert (await session.astart()) == "started"
Expand Down
31 changes: 31 additions & 0 deletions tests/run_examples.py
Original file line number Diff line number Diff line change
@@ -1 +1,32 @@
# TODO: implement test that runs all examples and checks that they don't crash
import sys, os
sys.path.insert(
0, os.path.abspath("../")
) # Adds the parent directory to the system path
import codeinterpreterapi

from codeinterpreterapi import CodeInterpreterSession

async def main():
# create a session
session = CodeInterpreterSession(verbose=True, model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", debugger=True, email="test@berri.ai")
await session.astart()

# generate a response based on user input
response = await session.generate_response(
"Plot the bitcoin chart of 2023 YTD"
)

# output the response (text + image)
print("AI: ", response.content)
for file in response.files:
file.show_image()

# terminate the session
await session.astop()


if __name__ == "__main__":
import asyncio
# run the async function
asyncio.run(main())

0 comments on commit 308e58a

Please sign in to comment.