diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index c795a3cabd82..8f7f7d50fb38 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -8,15 +8,8 @@ from autogpt.llm.base import Message from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.token_counter import count_message_tokens -from autogpt.log_cycle.log_cycle import PROMPT_NEXT_ACTION_FILE_NAME +from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME from autogpt.logs import logger -from autogpt.memory_management.store_memory import ( - save_memory_trimmed_from_context_window, -) -from autogpt.memory_management.summary_memory import ( - get_newly_trimmed_messages, - update_running_summary, -) cfg = Config() @@ -153,6 +146,10 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + from autogpt.memory_management.summary_memory import ( + get_newly_trimmed_messages, + update_running_summary, + ) # Insert Memories if len(full_message_history) > 0: @@ -164,7 +161,9 @@ def chat_with_ai( current_context=current_context, last_memory_index=agent.last_memory_index, ) + agent.summary_memory = update_running_summary( + agent, current_memory=agent.summary_memory, new_events=newly_trimmed_messages, ) @@ -237,7 +236,7 @@ def chat_with_ai( agent.created_at, agent.cycle_count, current_context, - PROMPT_NEXT_ACTION_FILE_NAME, + CURRENT_CONTEXT_FILE_NAME, ) # TODO: use a model defined elsewhere, so that model can contain diff --git a/autogpt/log_cycle/log_cycle.py b/autogpt/log_cycle/log_cycle.py index 720ca2736dd1..5f2732a8eb4e 100644 --- a/autogpt/log_cycle/log_cycle.py +++ b/autogpt/log_cycle/log_cycle.py @@ -6,8 +6,10 @@ DEFAULT_PREFIX = "agent" FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json" -PROMPT_NEXT_ACTION_FILE_NAME = "prompt_next_action.json" +CURRENT_CONTEXT_FILE_NAME = "current_context.json" NEXT_ACTION_FILE_NAME = "next_action.json" +PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json" +SUMMARY_FILE_NAME = "summary.txt" class LogCycleHandler: diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py index 754c09ba7c05..55ff385341e4 100644 --- a/autogpt/memory_management/summary_memory.py +++ b/autogpt/memory_management/summary_memory.py @@ -2,8 +2,10 @@ import json from typing import Dict, List, Tuple +from autogpt.agent import Agent from autogpt.config import Config from autogpt.llm.llm_utils import create_chat_completion +from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME cfg = Config() @@ -46,7 +48,7 @@ def get_newly_trimmed_messages( def update_running_summary( - current_memory: str, new_events: List[Dict[str, str]] + agent: Agent, current_memory: str, new_events: List[Dict[str, str]] ) -> str: """ This function takes a list of dictionaries representing new events and combines them with the current summary, @@ -110,9 +112,24 @@ def update_running_summary( "content": prompt, } ] + agent.log_cycle_handler.log_cycle( + agent.config.ai_name, + agent.created_at, + agent.cycle_count, + messages, + PROMPT_SUMMARY_FILE_NAME, + ) current_memory = create_chat_completion(messages, cfg.fast_llm_model) + agent.log_cycle_handler.log_cycle( + agent.config.ai_name, + agent.created_at, + agent.cycle_count, + current_memory, + SUMMARY_FILE_NAME, + ) + message_to_return = { "role": "system", "content": f"This reminds you of these events from your past: \n{current_memory}",