Skip to content

Commit

Permalink
merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
lpinheiroms committed Oct 27, 2024
2 parents 4391961 + 3fe0f9e commit b3f0672
Show file tree
Hide file tree
Showing 30 changed files with 1,999 additions and 1,597 deletions.
7 changes: 7 additions & 0 deletions .github/workflows/checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand All @@ -36,6 +37,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand Down Expand Up @@ -64,6 +66,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand Down Expand Up @@ -92,6 +95,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand All @@ -118,6 +122,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand All @@ -142,6 +147,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand All @@ -160,6 +166,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- uses: actions/setup-python@v5
with:
python-version: "3.11"
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/single-python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ jobs:
- uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: "0.4.26"
- run: uv build --package ${{ github.event.inputs.package }} --out-dir dist/
working-directory: python
- name: Publish package to PyPI
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from ._base_chat_agent import BaseChatAgent, BaseToolUseChatAgent
from ._assistant_agent import AssistantAgent
from ._base_chat_agent import BaseChatAgent
from ._code_executor_agent import CodeExecutorAgent
from ._coding_assistant_agent import CodingAssistantAgent
from ._tool_use_assistant_agent import ToolUseAssistantAgent

__all__ = [
"BaseChatAgent",
"BaseToolUseChatAgent",
"AssistantAgent",
"CodeExecutorAgent",
"CodingAssistantAgent",
"ToolUseAssistantAgent",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import asyncio
import json
import logging
from typing import Any, Awaitable, Callable, List, Sequence

from autogen_core.base import CancellationToken
from autogen_core.components import FunctionCall
from autogen_core.components.models import (
AssistantMessage,
ChatCompletionClient,
FunctionExecutionResult,
FunctionExecutionResultMessage,
LLMMessage,
SystemMessage,
UserMessage,
)
from autogen_core.components.tools import FunctionTool, Tool
from pydantic import BaseModel, ConfigDict

from .. import EVENT_LOGGER_NAME
from ..messages import (
ChatMessage,
StopMessage,
TextMessage,
)
from ._base_chat_agent import BaseChatAgent

event_logger = logging.getLogger(EVENT_LOGGER_NAME)


class ToolCallEvent(BaseModel):
"""A tool call event."""

tool_calls: List[FunctionCall]
"""The tool call message."""

model_config = ConfigDict(arbitrary_types_allowed=True)


class ToolCallResultEvent(BaseModel):
"""A tool call result event."""

tool_call_results: List[FunctionExecutionResult]
"""The tool call result message."""

model_config = ConfigDict(arbitrary_types_allowed=True)


class AssistantAgent(BaseChatAgent):
"""An agent that provides assistance with tool use.
It responds with a StopMessage when 'terminate' is detected in the response.
Args:
name (str): The name of the agent.
model_client (ChatCompletionClient): The model client to use for inference.
tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent.
description (str, optional): The description of the agent.
system_message (str, optional): The system message for the model.
"""

def __init__(
self,
name: str,
model_client: ChatCompletionClient,
*,
tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
description: str = "An agent that provides assistance with ability to use tools.",
system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with 'TERMINATE' when the task has been completed.",
):
super().__init__(name=name, description=description)
self._model_client = model_client
self._system_messages = [SystemMessage(content=system_message)]
self._tools: List[Tool] = []
if tools is not None:
for tool in tools:
if isinstance(tool, Tool):
self._tools.append(tool)
elif callable(tool):
if hasattr(tool, "__doc__") and tool.__doc__ is not None:
description = tool.__doc__
else:
description = ""
self._tools.append(FunctionTool(tool, description=description))
else:
raise ValueError(f"Unsupported tool type: {type(tool)}")
self._model_context: List[LLMMessage] = []

async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> ChatMessage:
# Add messages to the model context.
for msg in messages:
# TODO: add special handling for handoff messages
self._model_context.append(UserMessage(content=msg.content, source=msg.source))

# Generate an inference result based on the current model context.
llm_messages = self._system_messages + self._model_context
result = await self._model_client.create(llm_messages, tools=self._tools, cancellation_token=cancellation_token)

# Add the response to the model context.
self._model_context.append(AssistantMessage(content=result.content, source=self.name))

# Run tool calls until the model produces a string response.
while isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content):
event_logger.debug(ToolCallEvent(tool_calls=result.content))
# Execute the tool calls.
results = await asyncio.gather(
*[self._execute_tool_call(call, cancellation_token) for call in result.content]
)
event_logger.debug(ToolCallResultEvent(tool_call_results=results))
self._model_context.append(FunctionExecutionResultMessage(content=results))
# Generate an inference result based on the current model context.
result = await self._model_client.create(
self._model_context, tools=self._tools, cancellation_token=cancellation_token
)
self._model_context.append(AssistantMessage(content=result.content, source=self.name))

assert isinstance(result.content, str)
# Detect stop request.
request_stop = "terminate" in result.content.strip().lower()
if request_stop:
return StopMessage(content=result.content, source=self.name)

return TextMessage(content=result.content, source=self.name)

async def _execute_tool_call(
self, tool_call: FunctionCall, cancellation_token: CancellationToken
) -> FunctionExecutionResult:
"""Execute a tool call and return the result."""
try:
if not self._tools:
raise ValueError("No tools are available.")
tool = next((t for t in self._tools if t.name == tool_call.name), None)
if tool is None:
raise ValueError(f"The tool '{tool_call.name}' is not available.")
arguments = json.loads(tool_call.arguments)
result = await tool.run_json(arguments, cancellation_token)
result_as_str = tool.return_value_as_string(result)
return FunctionExecutionResult(content=result_as_str, call_id=tool_call.id)
except Exception as e:
return FunctionExecutionResult(content=f"Error: {e}", call_id=tool_call.id)
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from abc import ABC, abstractmethod
from typing import List, Sequence
from typing import Sequence

from autogen_core.base import CancellationToken
from autogen_core.components.tools import Tool

from ..base import ChatAgent, TaskResult, TerminationCondition, ToolUseChatAgent
from ..base import ChatAgent, TaskResult, TerminationCondition
from ..messages import ChatMessage
from ..teams import RoundRobinGroupChat

Expand Down Expand Up @@ -51,21 +50,3 @@ async def run(
termination_condition=termination_condition,
)
return result


class BaseToolUseChatAgent(BaseChatAgent, ToolUseChatAgent):
"""Base class for a chat agent that can use tools.
Subclass this base class to create an agent class that uses tools by returning
ToolCallMessage message from the :meth:`on_messages` method and receiving
ToolCallResultMessage message from the input to the :meth:`on_messages` method.
"""

def __init__(self, name: str, description: str, registered_tools: List[Tool]) -> None:
super().__init__(name, description)
self._registered_tools = registered_tools

@property
def registered_tools(self) -> List[Tool]:
"""The list of tools that the agent can use."""
return self._registered_tools
Original file line number Diff line number Diff line change
@@ -1,20 +1,14 @@
from typing import List, Sequence
import warnings

from autogen_core.base import CancellationToken
from autogen_core.components.models import (
AssistantMessage,
ChatCompletionClient,
LLMMessage,
SystemMessage,
UserMessage,
)

from ..messages import ChatMessage, MultiModalMessage, StopMessage, TextMessage
from ._base_chat_agent import BaseChatAgent
from ._assistant_agent import AssistantAgent


class CodingAssistantAgent(BaseChatAgent):
"""An agent that provides coding assistance using an LLM model client.
class CodingAssistantAgent(AssistantAgent):
"""[DEPRECATED] An agent that provides coding assistance using an LLM model client.
It responds with a StopMessage when 'terminate' is detected in the response.
"""
Expand All @@ -37,29 +31,10 @@ def __init__(
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
Reply "TERMINATE" in the end when code has been executed and task is complete.""",
):
super().__init__(name=name, description=description)
self._model_client = model_client
self._system_messages = [SystemMessage(content=system_message)]
self._model_context: List[LLMMessage] = []

async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> ChatMessage:
# Add messages to the model context and detect stopping.
for msg in messages:
if not isinstance(msg, TextMessage | MultiModalMessage | StopMessage):
raise ValueError(f"Unsupported message type: {type(msg)}")
self._model_context.append(UserMessage(content=msg.content, source=msg.source))

# Generate an inference result based on the current model context.
llm_messages = self._system_messages + self._model_context
result = await self._model_client.create(llm_messages, cancellation_token=cancellation_token)
assert isinstance(result.content, str)

# Add the response to the model context.
self._model_context.append(AssistantMessage(content=result.content, source=self.name))

# Detect stop request.
request_stop = "terminate" in result.content.strip().lower()
if request_stop:
return StopMessage(content=result.content, source=self.name)

return TextMessage(content=result.content, source=self.name)
# Deprecation warning.
warnings.warn(
"CodingAssistantAgent is deprecated. Use AssistantAgent instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(name, model_client, description=description, system_message=system_message)
Loading

0 comments on commit b3f0672

Please sign in to comment.