Skip to content

Commit

Permalink
newer commit
Browse files Browse the repository at this point in the history
  • Loading branch information
peytontolbert committed Sep 9, 2024
1 parent 3a4fca7 commit 7a49d71
Show file tree
Hide file tree
Showing 11 changed files with 850 additions and 436 deletions.
22 changes: 9 additions & 13 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ Dynamic Agent is an intelligent, adaptive system designed to process tasks using

### Code Execution
- Supports Python, JavaScript, and Bash execution.
- Uses a virtual environment for safe execution.
- Uses a 'virtual_env' directory for working directory.
- Monitors execution progress and provides real-time status updates.

### Knowledge Graph
Expand Down Expand Up @@ -86,12 +86,13 @@ Dynamic Agent is an intelligent, adaptive system designed to process tasks using
Once the Dynamic Agent is running, you can interact with it by entering tasks. The agent will automatically determine whether to provide a response or execute code based on the task.

Example tasks:
- "What is the capital of France?" (Response)
- "Calculate the factorial of 5" (Code Execution)
- "Create a list of prime numbers up to 100" (Code Execution)
- "Explain the concept of recursion" (Response)
- "What is the capital of France?"
- "Calculate the factorial of 5"
- "Create a list of prime numbers up to 100"
- "Copy D:/testproject and document the codebase"
- "Who is the current President of the United States? Use google search to find the answer."

The agent will guide you through the process, asking for additional information if needed and confirming when the task is complete.
The agent will go through the process, asking for additional information if needed and asking for confirmation when the task is complete.

To exit the program, simply type 'exit'.

Expand Down Expand Up @@ -119,12 +120,7 @@ Contributions to the Dynamic Agent project are welcome. Please ensure to follow
4. Push to the branch (`git push origin feature/AmazingFeature`)
5. Open a Pull Request

## License
## Acknowledgements

This project is licensed under the MIT License - see the `LICENSE` file for details.

## Acknowledgments

- OpenAI for the GPT model used in natural language processing
- Ollama for the GPT model used in natural language processing
- Neo4j for the graph database used in knowledge management
- All contributors who have helped to improve and expand this project
Empty file added TODO.md
Empty file.
96 changes: 86 additions & 10 deletions app/agent/context_manager.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,104 @@
from typing import Dict, Any, List
import json
from app.knowledge.knowledge_graph import KnowledgeGraph
from app.entropy.entropy_manager import EntropyManager # Import EntropyManager
import time


class ContextManager:
def __init__(self):
def __init__(self, knowledge_graph: KnowledgeGraph, entropy_manager: EntropyManager):
self.task_history: List[Dict[str, Any]] = []
self.working_memory: Dict[str, Any] = {}
self.knowledge_graph = knowledge_graph
self.entropy_manager = entropy_manager # Initialize EntropyManager

def add_task(self, task: str, action: str, result: str):
self.task_history.append({
async def add_task(self, task: str, action: str, result: str, score: float):
task_entry = {
"task": task,
"action": action,
"result": result
})
"result": result,
"score": score
}
self.task_history.append(task_entry)
await self.knowledge_graph.add_task_result(task, result, score)

# Update working memory with the latest task
await self.update_working_memory("latest_task", task_entry)

def update_working_memory(self, key: str, value: Any):
async def update_working_memory(self, key: str, value: Any):
self.working_memory[key] = value
await self.knowledge_graph.store_compressed_knowledge(json.dumps(self.working_memory))

def get_recent_context(self, num_tasks: int = 5) -> str:
async def get_recent_context(self, num_tasks: int = 5) -> str:
recent_tasks = self.task_history[-num_tasks:]
context = "Recent tasks:\n"
for task in recent_tasks:
context += f"Task: {task['task']}\nAction: {task['action']}\nResult: {task['result']}\n\n"
context += f"Task: {task['task']}\nAction: {task['action']}\nResult: {task['result']}\nScore: {task['score']}\n\n"

relevant_knowledge = await self.knowledge_graph.get_relevant_knowledge(context)
context += f"Relevant knowledge: {json.dumps(relevant_knowledge, indent=2)}\n\n"
context += f"Working memory: {json.dumps(self.working_memory, indent=2)}"
return context

def get_task_history(self, limit: int = 10) -> List[Dict[str, Any]]:
return self.task_history[-limit:]
async def get_task_history(self, limit: int = 10) -> List[Dict[str, Any]]:
return self.task_history[-limit:]

async def get_performance_metrics(self) -> Dict[str, float]:
return await self.knowledge_graph.get_system_performance()

async def add_tool_usage(self, tool_name: str, subtask: Dict[str, Any], result: Dict[str, Any]):
await self.knowledge_graph.store_tool_usage(tool_name, subtask, result)
# Update working memory with the latest tool usage
await self.update_working_memory("latest_tool_usage", {
"tool_name": tool_name,
"subtask": subtask,
"result": result
})

async def get_recent_tool_usage(self, limit: int = 5) -> List[Dict[str, Any]]:
return await self.knowledge_graph.get_tool_usage_history(limit=limit)

async def get_working_memory(self, key: str) -> Any:
return self.working_memory.get(key)

async def store_spatial_memory(self, location: str, context: Dict[str, Any]):
await self.knowledge_graph.add_or_update_node("SpatialMemory", {
"location": location,
"context": json.dumps(context),
"timestamp": time.time()
})

async def retrieve_spatial_memory(self, location: str) -> Dict[str, Any]:
result = await self.knowledge_graph.get_node("SpatialMemory", {"location": location})
return json.loads(result['context']) if result else {}

async def compress_long_term_memory(self):
old_memories = await self.knowledge_graph.get_old_memories(threshold_days=30)
compressed_memory = await self.entropy_manager.compress_memories(old_memories) # Use EntropyManager for memory compression
await self.knowledge_graph.store_compressed_memory(compressed_memory)

async def update_spatial_memory(self, location: str, context: Dict[str, Any]):
existing_context = await self.retrieve_spatial_memory(location)
updated_context = {**existing_context, **context}
await self.store_spatial_memory(location, updated_context)

async def get_memory_summary(self) -> Dict[str, Any]:
return {
"working_memory": self.working_memory,
"recent_tasks": self.task_history[-5:],
"recent_tool_usage": await self.get_recent_tool_usage(),
}

async def create_task_context(self, task: str) -> Dict[str, Any]:
return {"original_task": task, "steps": []}

async def get_contextual_knowledge(self, task: str, task_context: Dict[str, Any]) -> Dict[str, Any]:
recent_context = await self.get_recent_context()
relevant_knowledge = await self.knowledge_graph.get_relevant_knowledge(task)
relevant_episodes = await self.knowledge_graph.recall_relevant_episodes(task_context)

return {
"recent_context": recent_context,
"relevant_knowledge": relevant_knowledge,
"relevant_episodes": relevant_episodes
}
128 changes: 116 additions & 12 deletions app/agent/dynamic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from app.execution.code_execution_manager import CodeExecutionManager
from app.knowledge.knowledge_graph import KnowledgeGraph
from app.virtual_env.virtual_environment import VirtualEnvironment
from app.learning.continuous_learner import ContinuousLearner
from app.logging.logging_manager import LoggingManager
from app.utils.code_utils import format_code
from app.agent.context_manager import ContextManager
Expand All @@ -16,15 +15,13 @@ class DynamicAgent:
def __init__(self, uri, user, password, base_path):
self.llm = ChatGPT()
self.code_execution_manager = CodeExecutionManager(self.llm)
self.knowledge_graph = KnowledgeGraph(uri, user, password)
self.logging_manager = LoggingManager() # Initialize logging manager first
self.knowledge_graph = KnowledgeGraph(uri, user, password, self.llm) # Pass LLM to KnowledgeGraph
self.virtual_env = VirtualEnvironment(base_path)
self.env_id = None
self.has_memory = False
self.task_history = []
self.continuous_learner = ContinuousLearner(self.knowledge_graph, self.llm)
self.logging_manager = LoggingManager()
self.context_manager = ContextManager()
self.reward_model = RewardModel(self.llm)
self.context_manager = ContextManager(self.knowledge_graph)
self.reward_model = RewardModel(self.llm, self.knowledge_graph)

async def setup(self):
if not os.path.exists(self.virtual_env.base_path):
Expand All @@ -51,7 +48,7 @@ async def show_welcome_screen(self):
self.logging_manager.log_info(welcome_message)

async def process_task(self, task: str):
task_context = {"original_task": task, "steps": []}
task_context = await self.context_manager.create_task_context(task) # Use ContextManager to create task context
while True:
decision = await self.decide_action(task, task_context)
action = decision["action"]
Expand All @@ -70,7 +67,7 @@ async def process_task(self, task: str):
})
if is_complete:
break
task = input("User: ")
task += "\n" + input("User: ")
elif action == "code_execute":
result = await self.code_execute(task, task_context)
task_context["steps"].append({
Expand All @@ -85,15 +82,62 @@ async def process_task(self, task: str):
result = f"Error: Unknown action '{action}'. The agent can only 'respond' or 'code_execute'."
print(result)

# Update working memory with task context
await self.context_manager.update_working_memory(f"task_context_{task}", task_context)
await self.context_manager.update_working_memory("latest_result", result)

# Evaluate the task using the reward model
score = await self.reward_model.evaluate_task(task, task_context, result)
self.logging_manager.log_info(f"Task evaluation score: {score}")

# Update the knowledge graph with the task result and score
await self.knowledge_graph.add_task_result(task, result, score)
# Update the context manager with the task result and score
await self.context_manager.add_task(task, action, result, score)

# Store the episode in episodic memory
await self.knowledge_graph.store_episode(task_context)

# Reflect on the task
await self.reflect_on_task(task, task_context, result)

# Trigger memory consolidation
await self.consolidate_memory()

# Get learning insights
learning_insights = await self.reward_model.get_learning_insights()
self.logging_manager.log_info(f"Learning insights: {json.dumps(learning_insights, indent=2)}")

# Periodic memory consolidation
await self.periodic_memory_consolidation()

# Generate and store meta-learning insights
await self.generate_meta_learning_insights()

# Update knowledge with learned patterns
await self.reward_model.update_knowledge_with_patterns()

return "Task completed."

async def decide_action(self, task: str, task_context: Dict[str, Any]) -> Dict[str, Any]:
contextual_knowledge = await self.context_manager.get_contextual_knowledge(task, task_context)

prompt = f"""
Task: {task}
Recent Context: {contextual_knowledge['recent_context']}
Relevant Knowledge: {json.dumps(contextual_knowledge['relevant_knowledge'], indent=2)}
Relevant Past Episodes: {json.dumps(contextual_knowledge['relevant_episodes'], indent=2)}
Working Memory: {json.dumps(self.context_manager.working_memory, indent=2)}
Decide the best action to take: 'respond' or 'code_execute'.
Provide your decision as a JSON object with the following structure:
{{
"action": string,
"confidence": float,
"reasoning": string
}}
"""
decision = await self.llm.chat_with_ollama("You are an expert in task analysis and decision making.", prompt)
return json.loads(decision)

async def respond(self, task: str, task_context: Dict[str, Any]) -> str:
relevant_knowledge = await self.knowledge_graph.get_relevant_knowledge(task)

Expand Down Expand Up @@ -142,20 +186,58 @@ async def code_execute(self, task: str, task_context: Dict[str, Any]) -> str:
try:
result = await self.code_execution_manager.execute_and_monitor(formatted_code, self.execution_callback, language, cwd=workspace_dir)
if result['status'] == 'success':
# Record tool usage
await self.context_manager.add_tool_usage("code_execution", {
"task": task,
"language": language,
"thoughts": thoughts
}, {
"result": result['result'],
"status": result['status']
})

# Evaluate the task using the reward model
score = await self.reward_model.evaluate_task(task, task_context, result['result'])
self.logging_manager.log_info(f"Task evaluation score: {score}")

# Evaluate tool usage
tool_score = await self.reward_model.evaluate_tool_usage("code_execution", {
"task": task,
"language": language,
"thoughts": thoughts
}, {
"result": result['result'],
"status": result['status']
})
self.logging_manager.log_info(f"Tool usage evaluation score: {tool_score}")

# Update the knowledge graph with the task result and score
task_result = await self.knowledge_graph.add_task_result(task, result['result'], score)
await self.knowledge_graph.add_relationships_to_concepts(task_result['id'], task)
await self.continuous_learner.learn({"content": task}, {"result": result['result']})
return f"Thoughts: {thoughts}\n\nResult: {result['result']}\n\nTask completed successfully."
else:
error_analysis = await self.handle_error(result['error'], formatted_code)
# Record failed tool usage
await self.context_manager.add_tool_usage("code_execution", {
"task": task,
"language": language,
"thoughts": thoughts
}, {
"error": result['error'],
"status": result['status']
})
return f"Thoughts: {thoughts}\n\nError: {result['error']}\n\nSuggested Fix: {error_analysis}"
except Exception as e:
error_analysis = await self.handle_error(str(e), formatted_code)
# Record exception in tool usage
await self.context_manager.add_tool_usage("code_execution", {
"task": task,
"language": language,
"thoughts": thoughts
}, {
"exception": str(e),
"status": "exception"
})
return f"Unexpected error: {str(e)}\n\nSuggested Fix: {error_analysis}"

async def generate_thoughts(self, task: str) -> str:
Expand Down Expand Up @@ -186,6 +268,23 @@ async def handle_error(self, error: str, code: str = None):
self.context_manager.update_working_memory("last_error", {"error": error, "analysis": analysis})
return analysis

async def reflect_on_task(self, task: str, task_context: Dict[str, Any], result: str):
insights = await self.reward_model._extract_insights(task, task_context, result)
await self.knowledge_graph.integrate_insights(insights)

async def consolidate_memory(self):
recent_tasks = self.context_manager.get_recent_tasks()
await self.knowledge_graph.consolidate_memory(recent_tasks)

async def periodic_memory_consolidation(self):
await self.context_manager.compress_long_term_memory()
await self.knowledge_graph.consolidate_knowledge()

async def generate_meta_learning_insights(self):
recent_tasks = await self.context_manager.get_recent_tasks(limit=10)
insights = await self.reward_model.generate_meta_insights(recent_tasks)
await self.knowledge_graph.store_meta_learning_insights(insights)

async def run(self):
await self.setup()
while True:
Expand All @@ -195,6 +294,11 @@ async def run(self):
result = await self.process_task(task)
self.logging_manager.log_info(f"Task result: {result}")
print(result) # Display the result to the user

# Get latest meta-learning insights from the knowledge graph
latest_insights = await self.knowledge_graph.get_latest_meta_learning_insights()
self.logging_manager.log_info(f"Latest meta-learning insights: {json.dumps(latest_insights, indent=2)}")

await self.cleanup()

async def cleanup(self):
Expand Down
Loading

0 comments on commit 7a49d71

Please sign in to comment.