Skip to content

Commit

Permalink
new updates
Browse files Browse the repository at this point in the history
  • Loading branch information
peytontolbert committed Sep 12, 2024
1 parent 3122275 commit 5590df0
Show file tree
Hide file tree
Showing 20 changed files with 1,297 additions and 414 deletions.
166 changes: 101 additions & 65 deletions app/agent/agent_knowledge_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,34 +10,63 @@
from app.chat_with_ollama import ChatGPT
from typing import Dict, Any, List, Tuple
import re
from app.knowledge.spatial_knowledge import SpatialKnowledgeSystem
from app.knowledge.temporal_knowledge import TemporalKnowledgeSystem
from app.agent.agent_thoughts import AgentThoughts


class AgentKnowledgeInterface:
def __init__(self, uri, user, password, base_path):
self.knowledge_graph = KnowledgeGraph(uri, user, password)
self.embedding_manager = EmbeddingManager()
self.llm = ChatGPT()
self.procedural_memory = ProceduralKnowledgeSystem(self.knowledge_graph, self.embedding_manager)
self.episodic_memory = EpisodicKnowledgeSystem(self.knowledge_graph, self.embedding_manager)
self.procedural_memory = ProceduralKnowledgeSystem(
self.knowledge_graph, self.embedding_manager
)
self.episodic_memory = EpisodicKnowledgeSystem(
self.knowledge_graph, self.embedding_manager
)
self.conceptual_knowledge = ConceptualKnowledgeSystem(self.knowledge_graph)
self.community_manager = CommunityManager(self.knowledge_graph, self.embedding_manager)
self.contextual_knowledge = ContextualKnowledgeSystem(self.knowledge_graph, self.community_manager)
self.community_manager = CommunityManager(
self.knowledge_graph, self.embedding_manager
)
self.contextual_knowledge = ContextualKnowledgeSystem(
self.knowledge_graph, self.community_manager
)
self.meta_cognitive = MetaCognitiveKnowledgeSystem(self.knowledge_graph)
self.semantic_knowledge = SemanticKnowledgeSystem(self.knowledge_graph, self.embedding_manager)
self.semantic_knowledge = SemanticKnowledgeSystem(
self.knowledge_graph, self.embedding_manager
)
self.spatial_knowledge = SpatialKnowledgeSystem(self.knowledge_graph)
self.temporal_knowledge = TemporalKnowledgeSystem(self.knowledge_graph)
self.agent_thoughts = AgentThoughts()

async def gather_knowledge(self, task: str, context: str) -> dict:
related_episodes = await self.episodic_memory.remember_related_episodes(task, context)
async def gather_knowledge(self, task: str) -> dict:
context = await self.agent_knowledge_interface.contextual_knowledge.get_context(
task
)
related_episodes = await self.episodic_memory.remember_related_episodes(task)
recent_episodes = await self.episodic_memory.remember_recent_episodes(5)
interpreted_task = await self.semantic_knowledge.retrieve_language_meaning(task)
if not interpreted_task:
interpreted_task = await self.semantic_knowledge.enhance_language_understanding(task)

interpreted_task = (
await self.semantic_knowledge.enhance_language_understanding(task)
)

# Update procedural knowledge retrieval
procedural_info = await self.procedural_memory.retrieve_relevant_tool_usage(task)
procedural_info = await self.procedural_memory.retrieve_relevant_tool_usage(
task
)
tool_insights = await self.procedural_memory.get_tool_insights(task)

related_concepts = await self.conceptual_knowledge.get_related_concepts(task)
performance_data = await self.meta_cognitive.get_relevant_knowledge(task)
generalized_knowledge = await self.meta_cognitive.get_generalized_knowledge(related_concepts)
generalized_knowledge = await self.meta_cognitive.get_generalized_knowledge(
related_concepts
)

spatial_info = await self.spatial_knowledge.get_spatial_data(task)
temporal_info = await self.temporal_knowledge.get_temporal_data(task)

return {
"context_info": context,
Expand All @@ -49,39 +78,58 @@ async def gather_knowledge(self, task: str, context: str) -> dict:
"recent_episodes": recent_episodes,
"procedural_info": procedural_info,
"tool_insights": tool_insights,
"spatial_info": spatial_info,
"temporal_info": temporal_info,
}

async def update_knowledge_step(self, task: str, result: str, action: str, context: str, thoughts: str, action_thoughts: str):
await self.episodic_memory.log_task(task, result, context, thoughts, action_thoughts)
await self.meta_cognitive.log_performance(task, {"result": result, "action": action, "action_thoughts": action_thoughts})
async def update_knowledge_step(
self,
task: str,
result: str,
action: str,
thoughts: str,
action_thoughts: str,
):
await self.episodic_memory.log_task(task, result, thoughts, action_thoughts)
await self.meta_cognitive.log_performance(
task,
{"result": result, "action": action, "action_thoughts": action_thoughts},
)

if action == "code_execute":
insights, tool_usage = await self.procedural_memory.enhance_procedural_knowledge(task, result, context)
insights, tool_usage = (
await self.procedural_memory.enhance_procedural_knowledge(task, result)
)

episode = Episode(
thoughts={"task": task, "context": context, "thoughts": thoughts},
thoughts={"task": task, "thoughts": thoughts},
action={"type": action, "details": action_thoughts},
result=result,
summary=await self._generate_episode_summary(task, thoughts, action, result)
summary=await self._generate_episode_summary(
task, thoughts, action, result
),
)
await self.episodic_memory.memorize_episode(episode)

concepts = await self.meta_cognitive.extract_concepts(task)
return concepts

async def update_knowledge_complete(self, task: str, result: str, action: str, thoughts: str):
async def update_knowledge_complete(self, task: str):
concepts = await self.meta_cognitive.extract_concepts(task)
performance_data = await self.meta_cognitive.log_performance(task, result, thoughts)
generalized_knowledge = await self.meta_cognitive.generalize_knowledge(performance_data)
performance_data = await self.meta_cognitive.log_performance(task)
generalized_knowledge = await self.meta_cognitive.generalize_knowledge(
performance_data
)
await self.episodic_memory.organize_episodes()
await self.episodic_memory.generate_hierarchical_summary()
await self.conceptual_knowledge.update_concept_relations(concepts)
await self.contextual_knowledge.update_context(task, result, thoughts)
await self.semantic_knowledge.update_language_understanding(task, result)
if action == "code_execute":
await self.procedural_memory.enhance_tool_usage(task)
await self.contextual_knowledge.update_context(task)
await self.semantic_knowledge.update_language_understanding(task)
await self.procedural_memory.enhance_tool_usage(task)

async def _generate_episode_summary(self, task: str, thoughts: str, action: str, result: str) -> str:
async def _generate_episode_summary(
self, task: str, thoughts: str, action: str, result: str
) -> str:
prompt = f"""
Summarize the following episode:
Task: {task}
Expand All @@ -91,10 +139,14 @@ async def _generate_episode_summary(self, task: str, thoughts: str, action: str,
Provide a concise summary that captures the key points of this episode.
"""
summary = await self.llm.chat_with_ollama("You are an episode summarizer.", prompt)
summary = await self.llm.chat_with_ollama(
"You are an episode summarizer.", prompt
)
return summary.strip()

async def decide_action(self, task: str, knowledge, thoughts: str) -> Tuple[str, str]:
async def decide_action(
self, task: str, knowledge, thoughts: str
) -> Tuple[str, str]:

prompt = f"""
Analyze the following task, thoughts, and knowledge to decide whether to use the 'respond' or 'code_execute' action:
Expand All @@ -111,7 +163,9 @@ async def decide_action(self, task: str, knowledge, thoughts: str) -> Tuple[str,
Decision: <respond or code_execute>
Action Thoughts: <Your reasoning for this decision, including how past episodes and community knowledge influenced your choice>
"""
response = await self.llm.chat_with_ollama("You are a task analysis and decision-making expert.", prompt)
response = await self.llm.chat_with_ollama(
"You are a task analysis and decision-making expert.", prompt
)
decision, action_thoughts = self._extract_decision_and_thoughts(response)
return decision.strip().lower(), action_thoughts.strip()

Expand All @@ -128,21 +182,31 @@ def _format_episodic_context(self, episodes: List[Dict[str, Any]]) -> str:
return "\n\n".join(formatted_episodes)

def _extract_decision_and_thoughts(self, response: str) -> Tuple[str, str]:
decision_match = re.search(r"Decision:\s*(respond|code_execute)", response, re.IGNORECASE)
decision_match = re.search(
r"Decision:\s*(respond|code_execute)", response, re.IGNORECASE
)
thoughts_match = re.search(r"Action Thoughts:(.*)", response, re.DOTALL)

decision = decision_match.group(1) if decision_match else ""
thoughts = thoughts_match.group(1).strip() if thoughts_match else ""

return decision, thoughts

async def update_episode_relevance(self, episode_id: str, task: str, was_helpful: bool):
await self.episodic_memory.update_episode_relevance(episode_id, task, was_helpful)
async def update_episode_relevance(
self, episode_id: str, task: str, was_helpful: bool
):
await self.episodic_memory.update_episode_relevance(
episode_id, task, was_helpful
)

async def generate_response(self, task: str, thoughts: str, action_thoughts: str) -> str:
async def generate_response(
self, task: str, thoughts: str, action_thoughts: str
) -> str:
knowledge = await self.gather_knowledge(task, "") # Empty context for now
episodic_context = self._format_episodic_context(knowledge.get('related_episodes', []))
community_context = knowledge.get('community_knowledge', '')
episodic_context = self._format_episodic_context(
knowledge.get("related_episodes", [])
)
community_context = knowledge.get("community_knowledge", "")

prompt = f"""
Task: {task}
Expand All @@ -162,35 +226,7 @@ async def generate_response(self, task: str, thoughts: str, action_thoughts: str
Provide a response or question for clarification, taking into account the episodic and community knowledge.
"""
response = await self.llm.chat_with_ollama(
"You are a knowledgeable assistant with access to past experiences and community knowledge.", prompt
"You are a knowledgeable assistant with access to past experiences and community knowledge.",
prompt,
)
return response.strip()

async def generate_thoughts_from_context_and_abstract(self, task: str, context_info: str, generalized_knowledge: str) -> str:
prompt = f"""
Generate thoughts for the following complex task using the provided context and generalized knowledge:
Task: {task}
Context: {context_info}
Generalized Knowledge: {generalized_knowledge}
Consider how the context and generalized knowledge can be applied to approach this complex task.
Provide a structured thought process that breaks down the task and considers potential challenges and solutions.
"""
thoughts = await self.llm.chat_with_ollama("You are an expert in complex problem-solving and abstract thinking.", prompt)
return thoughts.strip()

async def generate_thoughts_from_procedural_and_episodic(self, task: str, recent_episodes: List[Dict[str, Any]]) -> str:
formatted_episodes = self._format_episodic_context(recent_episodes)
prompt = f"""
Generate thoughts for the following simple task using recent episodic memories:
Task: {task}
Recent Episodes:
{formatted_episodes}
Consider how these recent experiences can inform your approach to this task.
Provide a straightforward thought process that applies relevant past experiences to the current task.
"""
thoughts = await self.llm.chat_with_ollama("You are an expert in applying past experiences to current tasks.", prompt)
return thoughts.strip()
122 changes: 122 additions & 0 deletions app/agent/agent_thoughts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
from typing import List, Dict, Any
from app.chat_with_ollama import ChatGPT


class AgentThoughts:
def __init__(self):
self.llm = ChatGPT()

async def generate_thoughts_from_context_and_abstract(
self, task: str, context_info: str, generalized_knowledge: str
) -> str:
prompt = f"""
Generate thoughts for the following complex task using the provided context and generalized knowledge:
Task: {task}
Context: {context_info}
Generalized Knowledge: {generalized_knowledge}
Consider how the context and generalized knowledge can be applied to approach this complex task.
Provide a structured thought process as an autonomous agentthat breaks down the task and considers potential challenges and solutions.
Format your response as:
Thoughts: <Your thoughts>
"""
thoughts = await self.llm.chat_with_ollama(
"You are an expert in complex problem-solving and abstract thinking.",
prompt,
)
return thoughts.strip()

async def generate_thoughts_from_procedural_and_episodic(
self, task: str, recent_episodes: List[Dict[str, Any]]
) -> str:
formatted_episodes = self._format_episodic_context(recent_episodes)
prompt = f"""
Generate thoughts for the following simple task using recent episodic memories:
Task: {task}
Recent Episodes:
{formatted_episodes}
Consider how these recent experiences can inform your approach to this task.
Provide a straightforward thought process as an autonomous agent that applies relevant past experiences to the current task.
Format your response as:
Thoughts: <Your thoughts>
"""
thoughts = await self.llm.chat_with_ollama(
"You are an expert in applying past experiences to current tasks.", prompt
)
return thoughts.strip()

async def generate_thoughts_from_spatial(self, task: str, spatial_info: str) -> str:
prompt = f"""
Generate thoughts for the following task using the provided spatial information:
Task: {task}
Spatial Information: {spatial_info}
Format your response as:
Thoughts: <Your spacial thoughts>
"""
thoughts = await self.llm.chat_with_ollama(
"You are an expert in spatial reasoning and problem-solving.",
prompt,
)
return thoughts.strip()

async def generate_thoughts_from_temporal(
self, task: str, temporal_info: str
) -> str:
prompt = f"""
Generate thoughts for the following task using the provided temporal information:
Task: {task}
Temporal Information: {temporal_info}
Format your response as:
Thoughts: <Your temporal thoughts>
"""
thoughts = await self.llm.chat_with_ollama(
"You are an expert in temporal reasoning and problem-solving.",
prompt,
)
return thoughts.strip()

def _format_episodic_context(self, episodes: List[Dict[str, Any]]) -> str:
formatted_episodes = []
for episode in episodes:
formatted_episode = f"""
Summary: {episode['summary']}
Similarity: {episode['similarity']}
Action: {episode['action']['type']}
Result: {episode['result']}
"""
formatted_episodes.append(formatted_episode.strip())
return "\n\n".join(formatted_episodes)

async def generate_thoughts_from_action_result(
self, task: str, action_thoughts: str, result: str
) -> str:
prompt = f"""
Generate thoughts based on the task, action thoughts, and result:
Task: {task}
Action Thoughts: {action_thoughts}
Result: {result}
Analyze the outcome and provide insights on:
1. Was the action successful in addressing the task?
2. What can be learned from this result?
3. Are there any adjustments needed for future similar tasks?
4. What are the next steps or considerations based on this outcome?
Format your response as:
Thoughts: <Your thoughts>
"""
thoughts = await self.llm.chat_with_ollama(
"You are an expert in analyzing actions and their outcomes.",
prompt,
)
return thoughts.strip()
Loading

0 comments on commit 5590df0

Please sign in to comment.