From 66c63a77102d827c4fa8f752b402a357f83c129b Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 18 Mar 2025 19:46:53 -0700 Subject: [PATCH 01/26] Introduce StructuredMessage type into ChatMessage --- .../agents/_assistant_agent.py | 16 ++- .../agents/_code_executor_agent.py | 4 +- .../agents/_society_of_mind_agent.py | 15 +-- .../src/autogen_agentchat/messages.py | 66 +++++++++- .../src/autogen_agentchat/state/_states.py | 47 ++++++- .../teams/_group_chat/_selector_group_chat.py | 4 + .../src/autogen_agentchat/ui/_console.py | 4 + .../src/autogen_agentchat/utils/_utils.py | 8 +- .../tests/test_assistant_agent.py | 19 +++ .../tests/test_group_chat.py | 115 ++++++++++++++++-- .../autogen-agentchat/tests/test_messages.py | 60 +++++++++ .../agents/file_surfer/_file_surfer.py | 19 ++- .../agents/openai/_openai_assistant_agent.py | 34 ++++-- .../web_surfer/_multimodal_web_surfer.py | 20 ++- .../tests/test_filesurfer_agent.py | 3 + 15 files changed, 396 insertions(+), 38 deletions(-) create mode 100644 python/packages/autogen-agentchat/tests/test_messages.py diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index ee8afbce1582..c3a8d7eda39d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -46,6 +46,7 @@ HandoffMessage, MemoryQueryEvent, ModelClientStreamingChunkEvent, + StructuredMessage, TextMessage, ThoughtEvent, ToolCallExecutionEvent, @@ -810,7 +811,20 @@ async def _add_messages_to_context( # Add handoff context to the model context. for context_msg in msg.context: await model_context.add_message(context_msg) - await model_context.add_message(UserMessage(content=msg.content, source=msg.source)) + # Add the handoff message itself to the model context. + await model_context.add_message(UserMessage(content=msg.content, source=msg.source)) + elif isinstance(msg, StructuredMessage): + # Add structured message to the model context by serializing it. + serialized_content = msg.content.model_dump_json() + await model_context.add_message( + UserMessage( + content=serialized_content, + source=msg.source, + ) + ) + else: + # Add the content as user message content. + await model_context.add_message(UserMessage(content=msg.content, source=msg.source)) @staticmethod async def _update_model_context_with_memory( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index 089daa2a15f0..e94ce4550efb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -21,7 +21,9 @@ class CodeExecutorAgentConfig(BaseModel): class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]): - """An agent that extracts and executes code snippets found in received messages and returns the output. + """An agent that extracts and executes code snippets found in received + :class:`~autogen_agentchat.messages.TextMessage` messages and returns the output + of the code execution. It is typically used within a team with another agent that generates code snippets to be executed. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index 2eba918714b7..f991e04ec482 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -14,6 +14,7 @@ BaseChatMessage, ChatMessage, ModelClientStreamingChunkEvent, + StructuredMessage, TextMessage, ) from ._base_chat_agent import BaseChatAgent @@ -167,13 +168,13 @@ async def on_messages_stream( else: # Generate a response using the model client. llm_messages: List[LLMMessage] = [SystemMessage(content=self._instruction)] - llm_messages.extend( - [ - UserMessage(content=message.content, source=message.source) - for message in inner_messages - if isinstance(message, BaseChatMessage) - ] - ) + for message in messages: + if isinstance(message, BaseChatMessage): + if isinstance(message, StructuredMessage): + serialized_message = message.content.model_dump_json() + llm_messages.append(UserMessage(content=serialized_message, source=message.source)) + else: + llm_messages.append(UserMessage(content=message.content, source=message.source)) llm_messages.append(SystemMessage(content=self._response_prompt)) completion = await self._model_client.create(messages=llm_messages, cancellation_token=cancellation_token) assert isinstance(completion.content, str) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 89500a50e344..1905dc476a90 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -4,8 +4,9 @@ class and includes specific fields relevant to the type of message being sent. """ +import importlib from abc import ABC -from typing import Dict, List, Literal +from typing import Any, Dict, Generic, List, Literal, Self, TypeVar from autogen_core import FunctionCall, Image from autogen_core.memory import MemoryContent @@ -41,6 +42,60 @@ class BaseAgentEvent(BaseMessage, ABC): pass +ContentType = TypeVar("ContentType", bound=BaseModel, covariant=True) + + +class StructuredMessage(BaseChatMessage, Generic[ContentType]): + """A structured message with a specific content type.""" + + content: ContentType + """The content of the message. Must be a subclass of + `Pydantic BaseModel `_.""" + + content_class_path: str | None = None + """The path to the content class. This is set automatically when the message is created.""" + + type: Literal["StructuredMessage"] = "StructuredMessage" + + def __init__(self, **data: Any) -> None: + super().__init__(**data) + module_name = self.content.__class__.__module__ + class_name = self.content.__class__.__qualname__ + self.content_class_path = f"{module_name}.{class_name}" + + def dump(self) -> Dict[str, Any]: + """Dump the message to a dictionary. This is used for serialization + and ensures that the content is serialized correctly.""" + data = super().model_dump() + data["content"] = self.content.model_dump() + return data + + @classmethod + def load(cls, obj: Dict[str, Any]) -> Self: + """Load the message from a dictionary. This is used for deserialization + and ensures that the content is deserialized correctly.""" + content_class_path = obj["content_class_path"] + module_name, class_name = content_class_path.rsplit(".", 1) + try: + module = importlib.import_module(module_name) + except ImportError as e: + raise ImportError( + f"Could not import module {module_name} when loading content class {class_name} for StructuredMessage. Ensure it is installed." + ) from e + if not hasattr(module, class_name): + raise ValueError( + f"Could not find class {class_name} in module {module_name} when loading content class for StructuredMessage." + ) + content_class = getattr(module, class_name) + if not issubclass(content_class, BaseModel): + raise ValueError(f"Invalid content class: {content_class}, must be a subclass of BaseModel") + content = content_class.model_validate(obj["content"]) + instance = super().model_validate(obj) + instance.content = content # type: ignore + instance.content_class_path = content_class_path + return instance + + class TextMessage(BaseChatMessage): """A text message.""" @@ -152,7 +207,13 @@ class ThoughtEvent(BaseAgentEvent): ChatMessage = Annotated[ - TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type") + StructuredMessage[BaseModel] + | TextMessage + | MultiModalMessage + | StopMessage + | ToolCallSummaryMessage + | HandoffMessage, + Field(discriminator="type"), ] """Messages for agent-to-agent communication only.""" @@ -173,6 +234,7 @@ class ThoughtEvent(BaseAgentEvent): "AgentEvent", "BaseMessage", "ChatMessage", + "StructuredMessage", "HandoffMessage", "MultiModalMessage", "StopMessage", diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py index 16ddbc7472d6..3af435be8a10 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py @@ -1,10 +1,11 @@ -from typing import Annotated, Any, List, Mapping, Optional +from typing import Annotated, Any, Dict, List, Mapping, Optional, Self from pydantic import BaseModel, Field from ..messages import ( AgentEvent, ChatMessage, + StructuredMessage, ) # Ensures pydantic can distinguish between types of events & messages. @@ -39,6 +40,28 @@ class BaseGroupChatManagerState(BaseState): current_turn: int = Field(default=0) type: str = Field(default="BaseGroupChatManagerState") + def model_dump(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """Override model_dump to ensure StructuredMessage is handled properly.""" + state = super().model_dump(*args, **kwargs) + for i, message in enumerate(self.message_thread): + if isinstance(message, StructuredMessage): + state["message_thread"][i] = message.dump() + return state + + @classmethod + def model_validate(cls, obj: Any, **kwargs: Any) -> Self: + """Override model_validate to ensure message_thread is handled properly for StructuredMessage.""" + messages = obj["message_thread"] + instance = super().model_validate(obj, **kwargs) + for i, message in enumerate(messages): + # For each message, check if it's a structured message and validate it + # If it is, replace it with the validated instance + # Otherwise, leave it as is. + if message["type"] == "StructuredMessage": + structured_message = StructuredMessage[BaseModel].load(message) + instance.message_thread[i] = structured_message + return instance + class ChatAgentContainerState(BaseState): """State for a container of chat agents.""" @@ -47,6 +70,28 @@ class ChatAgentContainerState(BaseState): message_buffer: List[ChatMessage] = Field(default_factory=list) type: str = Field(default="ChatAgentContainerState") + def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]: + """Override model_dump to ensure StructuredMessage is handled properly.""" + state = super().model_dump(*args, **kwargs) + for i, message in enumerate(self.message_buffer): + if isinstance(message, StructuredMessage): + state["message_buffer"][i] = message.dump() + return state + + @classmethod + def model_validate(cls, obj: Any, **kwargs: Any) -> Self: + """Override model_validate to ensure message_buffer is handled properly for StructuredMessage.""" + messages = obj["message_buffer"] + instance = super().model_validate(obj, **kwargs) + for i, message in enumerate(messages): + # For each message, check if it's a structured message and validate it + # If it is, replace it with the validated instance + # Otherwise, leave it as is. + if message["type"] == "StructuredMessage": + structured_message = StructuredMessage[BaseModel].load(message) + instance.message_buffer[i] = structured_message + return instance + class RoundRobinManagerState(BaseGroupChatManagerState): """State for :class:`~autogen_agentchat.teams.RoundRobinGroupChat` manager.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 2446024c3c21..e8fcb8d93951 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -16,6 +16,7 @@ BaseAgentEvent, ChatMessage, MultiModalMessage, + StructuredMessage, ) from ...state import SelectorManagerState from ._base_group_chat import BaseGroupChat @@ -143,6 +144,9 @@ async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: message += f" {item}" else: message += " [Image]" + elif isinstance(msg, StructuredMessage): + serialized_content = msg.content.model_dump_json() + message += f" {serialized_content}" else: raise ValueError(f"Unexpected message type in selector: {type(msg)}") history_messages.append( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 0a95c842ea08..5e422a4e68fb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -15,6 +15,7 @@ ChatMessage, ModelClientStreamingChunkEvent, MultiModalMessage, + StructuredMessage, UserInputRequestedEvent, ) @@ -215,5 +216,8 @@ def _message_to_str(message: AgentEvent | ChatMessage, *, render_image_iterm: bo else: result.append("") return "\n".join(result) + elif isinstance(message, StructuredMessage): + # If the message is a StructuredMessage, we want to print the content as JSON. + return message.content.model_dump_json(indent=2) else: return f"{message.content}" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/utils/_utils.py b/python/packages/autogen-agentchat/src/autogen_agentchat/utils/_utils.py index 6de1178645fc..738b72e9b329 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/utils/_utils.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/utils/_utils.py @@ -2,18 +2,24 @@ from autogen_core import FunctionCall, Image from autogen_core.models import FunctionExecutionResult, LLMMessage, UserMessage +from pydantic import BaseModel # Type aliases for convenience +_StructuredContent = BaseModel _UserContent = Union[str, List[Union[str, Image]]] _AssistantContent = Union[str, List[FunctionCall]] _FunctionExecutionContent = List[FunctionExecutionResult] _SystemContent = str -def content_to_str(content: _UserContent | _AssistantContent | _FunctionExecutionContent | _SystemContent) -> str: +def content_to_str( + content: _UserContent | _AssistantContent | _FunctionExecutionContent | _SystemContent | _StructuredContent, +) -> str: """Convert the content of an LLMMessage to a string.""" if isinstance(content, str): return content + elif isinstance(content, BaseModel): + return content.model_dump_json() else: result: List[str] = [] for c in content: diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 11600282c28b..e418d763eb8e 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -12,6 +12,7 @@ MemoryQueryEvent, ModelClientStreamingChunkEvent, MultiModalMessage, + StructuredMessage, TextMessage, ThoughtEvent, ToolCallExecutionEvent, @@ -35,6 +36,7 @@ from autogen_core.tools import FunctionTool from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.models.replay import ReplayChatCompletionClient +from pydantic import BaseModel from utils import FileLogHandler logger = logging.getLogger(EVENT_LOGGER_NAME) @@ -471,6 +473,23 @@ async def test_multi_modal_task(monkeypatch: pytest.MonkeyPatch) -> None: assert len(result.messages) == 2 +@pytest.mark.asyncio +async def test_run_with_structured_task() -> None: + class InputTask(BaseModel): + input: str + data: List[str] + + model_client = ReplayChatCompletionClient(["Hello"]) + agent = AssistantAgent( + name="assistant", + model_client=model_client, + ) + + task = StructuredMessage[InputTask](content=InputTask(input="Test", data=["Test1", "Test2"]), source="user") + result = await agent.run(task=task) + assert len(result.messages) == 2 + + @pytest.mark.asyncio async def test_invalid_model_capabilities() -> None: model = "random-model" diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 5ab1605a72b7..ac8c4ea482c8 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -20,6 +20,7 @@ HandoffMessage, MultiModalMessage, StopMessage, + StructuredMessage, TextMessage, ToolCallExecutionEvent, ToolCallRequestEvent, @@ -44,6 +45,7 @@ from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_ext.models.replay import ReplayChatCompletionClient +from pydantic import BaseModel from utils import FileLogHandler logger = logging.getLogger(EVENT_LOGGER_NAME) @@ -122,6 +124,19 @@ def _pass_function(input: str) -> str: return "pass" +class InputTask1(BaseModel): + task: str + data: List[str] + + +class InputTask2(BaseModel): + task: str + data: str + + +TaskType = str | List[ChatMessage] | ChatMessage + + @pytest_asyncio.fixture(params=["single_threaded", "embedded"]) # type: ignore async def runtime(request: pytest.FixtureRequest) -> AsyncGenerator[AgentRuntime | None, None]: if request.param == "single_threaded": @@ -202,15 +217,32 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: model_client.reset() index = 0 await team.reset() - result_2 = await team.run( - task=MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user") - ) - assert result.messages[0].content == result_2.messages[0].content[0] + task = MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user") + result_2 = await team.run(task=task) + assert result.messages[0].content == task.content[0] assert result.messages[1:] == result_2.messages[1:] @pytest.mark.asyncio -async def test_round_robin_group_chat_state(runtime: AgentRuntime | None) -> None: +@pytest.mark.parametrize( + "task", + [ + "Write a program that prints 'Hello, world!'", + [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], + [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], + [ + StructuredMessage[InputTask1]( + content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + source="user", + ), + StructuredMessage[InputTask2]( + content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + ), + ], + ], + ids=["text", "text_message", "multi_modal_message", "structured_message"], +) +async def test_round_robin_group_chat_state(task: TaskType, runtime: AgentRuntime | None) -> None: model_client = ReplayChatCompletionClient( ["No facts", "No plan", "print('Hello, world!')", "TERMINATE"], ) @@ -218,7 +250,7 @@ async def test_round_robin_group_chat_state(runtime: AgentRuntime | None) -> Non agent2 = AssistantAgent("agent2", model_client=model_client) termination = TextMentionTermination("TERMINATE") team1 = RoundRobinGroupChat(participants=[agent1, agent2], termination_condition=termination, runtime=runtime) - await team1.run(task="Write a program that prints 'Hello, world!'") + await team1.run(task=task) state = await team1.save_state() agent3 = AssistantAgent("agent1", model_client=model_client) @@ -485,7 +517,25 @@ async def test_selector_group_chat(runtime: AgentRuntime | None) -> None: @pytest.mark.asyncio -async def test_selector_group_chat_state(runtime: AgentRuntime | None) -> None: +@pytest.mark.parametrize( + "task", + [ + "Write a program that prints 'Hello, world!'", + [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], + [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], + [ + StructuredMessage[InputTask1]( + content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + source="user", + ), + StructuredMessage[InputTask2]( + content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + ), + ], + ], + ids=["text", "text_message", "multi_modal_message", "structured_message"], +) +async def test_selector_group_chat_state(task: TaskType, runtime: AgentRuntime | None) -> None: model_client = ReplayChatCompletionClient( ["agent1", "No facts", "agent2", "No plan", "agent1", "print('Hello, world!')", "agent2", "TERMINATE"], ) @@ -498,7 +548,7 @@ async def test_selector_group_chat_state(runtime: AgentRuntime | None) -> None: model_client=model_client, runtime=runtime, ) - await team1.run(task="Write a program that prints 'Hello, world!'") + await team1.run(task=task) state = await team1.save_state() agent3 = AssistantAgent("agent1", model_client=model_client) @@ -839,6 +889,55 @@ async def test_swarm_handoff(runtime: AgentRuntime | None) -> None: assert manager_1._current_speaker == manager_2._current_speaker # pyright: ignore +@pytest.mark.asyncio +@pytest.mark.parametrize( + "task", + [ + "Write a program that prints 'Hello, world!'", + [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], + [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], + [ + StructuredMessage[InputTask1]( + content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + source="user", + ), + StructuredMessage[InputTask2]( + content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + ), + ], + ], + ids=["text", "text_message", "multi_modal_message", "structured_message"], +) +async def test_swarm_handoff_state(task: TaskType, runtime: AgentRuntime | None) -> None: + first_agent = _HandOffAgent("first_agent", description="first agent", next_agent="second_agent") + second_agent = _HandOffAgent("second_agent", description="second agent", next_agent="third_agent") + third_agent = _HandOffAgent("third_agent", description="third agent", next_agent="first_agent") + + termination = MaxMessageTermination(6) + team1 = Swarm([second_agent, first_agent, third_agent], termination_condition=termination, runtime=runtime) + await team1.run(task=task) + state = await team1.save_state() + + first_agent2 = _HandOffAgent("first_agent", description="first agent", next_agent="second_agent") + second_agent2 = _HandOffAgent("second_agent", description="second agent", next_agent="third_agent") + third_agent2 = _HandOffAgent("third_agent", description="third agent", next_agent="first_agent") + team2 = Swarm([second_agent2, first_agent2, third_agent2], termination_condition=termination, runtime=runtime) + await team2.load_state(state) + state2 = await team2.save_state() + assert state == state2 + + manager_1 = await team1._runtime.try_get_underlying_agent_instance( # pyright: ignore + AgentId(f"{team1._group_chat_manager_name}_{team1._team_id}", team1._team_id), # pyright: ignore + SwarmGroupChatManager, # pyright: ignore + ) + manager_2 = await team2._runtime.try_get_underlying_agent_instance( # pyright: ignore + AgentId(f"{team2._group_chat_manager_name}_{team2._team_id}", team2._team_id), # pyright: ignore + SwarmGroupChatManager, # pyright: ignore + ) + assert manager_1._message_thread == manager_2._message_thread # pyright: ignore + assert manager_1._current_speaker == manager_2._current_speaker # pyright: ignore + + @pytest.mark.asyncio async def test_swarm_handoff_using_tool_calls(runtime: AgentRuntime | None) -> None: model_client = ReplayChatCompletionClient( diff --git a/python/packages/autogen-agentchat/tests/test_messages.py b/python/packages/autogen-agentchat/tests/test_messages.py new file mode 100644 index 000000000000..79b1ee38496d --- /dev/null +++ b/python/packages/autogen-agentchat/tests/test_messages.py @@ -0,0 +1,60 @@ +from autogen_agentchat.messages import StructuredMessage +from pydantic import BaseModel + + +class TestContent(BaseModel): + """Test content model.""" + + field1: str + field2: int + + +def test_structured_message(): + # Create a structured message with the test content + message = StructuredMessage[TestContent]( + source="test_agent", + content=TestContent(field1="test", field2=42), + ) + + # Check that the message type is correct + assert message.type == "StructuredMessage" + + # Check that the content is of the correct type + assert isinstance(message.content, TestContent) + + # Check that the content class is set correctly + assert message.content_class_path == "test_messages.TestContent" + + # Check that the content fields are set correctly + assert message.content.field1 == "test" + assert message.content.field2 == 42 + + # Check that model_dump works correctly + dumped_message = message.model_dump() + assert dumped_message["source"] == "test_agent" + assert dumped_message["content_class_path"] == "test_messages.TestContent" + assert dumped_message["content"]["field1"] == "test" + assert dumped_message["content"]["field2"] == 42 + + # Check that model_validate works correctly + validated_message = StructuredMessage[TestContent].model_validate(dumped_message) + assert validated_message.source == "test_agent" + assert isinstance(validated_message.content, TestContent) + assert validated_message.content_class_path == "test_messages.TestContent" + assert validated_message.content.field1 == "test" + assert validated_message.content.field2 == 42 + + # Check that the dump method works correctly + dumped_message = message.dump() + assert dumped_message["type"] == "StructuredMessage" + assert dumped_message["content_class_path"] == "test_messages.TestContent" + assert dumped_message["content"]["field1"] == "test" + assert dumped_message["content"]["field2"] == 42 + + # Check that the load method works correctly for dynamic deserialization. + message2 = StructuredMessage[BaseModel].load(dumped_message) + assert message2.type == "StructuredMessage" + assert isinstance(message2.content, TestContent) + assert message2.content_class_path == "test_messages.TestContent" + assert message2.content.field1 == "test" + assert message2.content.field2 == 42 diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index aec34cc6364b..af55a742cc83 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -7,7 +7,7 @@ from autogen_agentchat.base import Response from autogen_agentchat.messages import ( ChatMessage, - MultiModalMessage, + StructuredMessage, TextMessage, ) from autogen_agentchat.utils import remove_images @@ -90,10 +90,21 @@ def produced_message_types(self) -> Sequence[type[ChatMessage]]: async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: for chat_message in messages: - if isinstance(chat_message, TextMessage | MultiModalMessage): - self._chat_history.append(UserMessage(content=chat_message.content, source=chat_message.source)) + if isinstance(chat_message, StructuredMessage): + serialized_content = chat_message.content.model_dump_json() + self._chat_history.append( + UserMessage( + content=serialized_content, + source=chat_message.source, + ) + ) else: - raise ValueError(f"Unexpected message in FileSurfer: {chat_message}") + self._chat_history.append( + UserMessage( + content=chat_message.content, + source=chat_message.source, + ) + ) try: _, content = await self._generate_reply(cancellation_token=cancellation_token) diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index f0ea845007f9..283e34ebbb17 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -26,14 +26,11 @@ from autogen_agentchat.messages import ( AgentEvent, ChatMessage, - HandoffMessage, - MultiModalMessage, - StopMessage, TextMessage, ToolCallExecutionEvent, ToolCallRequestEvent, ) -from autogen_core import CancellationToken, FunctionCall +from autogen_core import CancellationToken, FunctionCall, Image from autogen_core.models._model_client import ChatCompletionClient from autogen_core.models._types import FunctionExecutionResult from autogen_core.tools import FunctionTool, Tool @@ -52,6 +49,12 @@ from openai.types.beta.function_tool_param import FunctionToolParam from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter from openai.types.beta.threads import Message, MessageDeleted, Run +from openai.types.beta.threads.image_url_content_block_param import ImageURLContentBlockParam +from openai.types.beta.threads.image_url_param import ImageURLParam +from openai.types.beta.threads.message_content_part_param import ( + MessageContentPartParam, +) +from openai.types.beta.threads.text_content_block_param import TextContentBlockParam from openai.types.beta.vector_store import VectorStore from openai.types.shared_params.function_definition import FunctionDefinition @@ -406,10 +409,7 @@ async def on_messages_stream( # Process all messages in sequence for message in messages: - if isinstance(message, (TextMessage, MultiModalMessage)): - await self.handle_text_message(str(message.content), cancellation_token) - elif isinstance(message, (StopMessage, HandoffMessage)): - await self.handle_text_message(message.content, cancellation_token) + await self.handle_incoming_message(message, cancellation_token) # Inner messages for tool calls inner_messages: List[AgentEvent | ChatMessage] = [] @@ -519,8 +519,24 @@ async def on_messages_stream( chat_message = TextMessage(source=self.name, content=text_content[0].text.value) yield Response(chat_message=chat_message, inner_messages=inner_messages) - async def handle_text_message(self, content: str, cancellation_token: CancellationToken) -> None: + async def handle_incoming_message(self, message: ChatMessage, cancellation_token: CancellationToken) -> None: """Handle regular text messages by adding them to the thread.""" + content: str | List[MessageContentPartParam] + if isinstance(message.content, str): + content = message.content + elif isinstance(message.content, BaseModel): + content = message.content.model_dump_json() + elif isinstance(message.content, list): + content = [] + for c in message.content: + if isinstance(c, str): + content.append(TextContentBlockParam(text=c, type="text")) + elif isinstance(c, Image): + content.append(ImageURLContentBlockParam(image_url=ImageURLParam(url=c.data_uri), type="image_url")) + else: + raise ValueError(f"Unsupported content type: {type(c)} in {message.content}") + else: + raise ValueError(f"Unsupported content type: {type(message.content)} in {message}") await cancellation_token.link_future( asyncio.ensure_future( self._client.beta.threads.messages.create( diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index f4fb3abd10ea..4fe5dd919eda 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -24,7 +24,7 @@ import PIL.Image from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, StructuredMessage, TextMessage from autogen_agentchat.utils import content_to_str, remove_images from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall from autogen_core import Image as AGImage @@ -432,10 +432,22 @@ async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: for chat_message in messages: - if isinstance(chat_message, TextMessage | MultiModalMessage): - self._chat_history.append(UserMessage(content=chat_message.content, source=chat_message.source)) + if isinstance(chat_message, StructuredMessage): + serialized_content = chat_message.content.model_dump_json() + self._chat_history.append( + UserMessage( + content=serialized_content, + source=chat_message.source, + ) + ) else: - raise ValueError(f"Unexpected message in MultiModalWebSurfer: {chat_message}") + self._chat_history.append( + UserMessage( + content=chat_message.content, + source=chat_message.source, + ) + ) + self.inner_messages: List[AgentEvent | ChatMessage] = [] self.model_usage: List[RequestUsage] = [] try: diff --git a/python/packages/autogen-ext/tests/test_filesurfer_agent.py b/python/packages/autogen-ext/tests/test_filesurfer_agent.py index 470bb270a9ef..04fc46365475 100644 --- a/python/packages/autogen-ext/tests/test_filesurfer_agent.py +++ b/python/packages/autogen-ext/tests/test_filesurfer_agent.py @@ -8,6 +8,7 @@ import aiofiles import pytest from autogen_agentchat import EVENT_LOGGER_NAME +from autogen_agentchat.messages import TextMessage from autogen_ext.agents.file_surfer import FileSurfer from autogen_ext.models.openai import OpenAIChatCompletionClient from openai.resources.chat.completions import AsyncCompletions @@ -140,9 +141,11 @@ async def test_run_filesurfer(monkeypatch: pytest.MonkeyPatch) -> None: # Get the FileSurfer to read the file, and the directory assert agent._name == "FileSurfer" # pyright: ignore[reportPrivateUsage] result = await agent.run(task="Please read the test file") + assert isinstance(result.messages[1], TextMessage) assert "# FileSurfer test H1" in result.messages[1].content result = await agent.run(task="Please read the test directory") + assert isinstance(result.messages[1], TextMessage) assert "# Index of " in result.messages[1].content assert "test_filesurfer_agent.html" in result.messages[1].content From 234226a024a308a096e006cee63a8575fef27bfd Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 18 Mar 2025 19:59:21 -0700 Subject: [PATCH 02/26] fix type --- .../autogen-agentchat/src/autogen_agentchat/messages.py | 4 ++-- .../autogen-agentchat/src/autogen_agentchat/state/_states.py | 3 ++- python/packages/autogen-agentchat/tests/test_messages.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 1905dc476a90..88f22e23f550 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -6,13 +6,13 @@ class and includes specific fields relevant to the type of message being sent. import importlib from abc import ABC -from typing import Any, Dict, Generic, List, Literal, Self, TypeVar +from typing import Any, Dict, Generic, List, Literal, TypeVar from autogen_core import FunctionCall, Image from autogen_core.memory import MemoryContent from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Annotated +from typing_extensions import Annotated, Self class BaseMessage(BaseModel, ABC): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py index 3af435be8a10..d23a3de5fc9c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py @@ -1,6 +1,7 @@ -from typing import Annotated, Any, Dict, List, Mapping, Optional, Self +from typing import Annotated, Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Field +from typing_extensions import Self from ..messages import ( AgentEvent, diff --git a/python/packages/autogen-agentchat/tests/test_messages.py b/python/packages/autogen-agentchat/tests/test_messages.py index 79b1ee38496d..2777e9a7de8a 100644 --- a/python/packages/autogen-agentchat/tests/test_messages.py +++ b/python/packages/autogen-agentchat/tests/test_messages.py @@ -9,7 +9,7 @@ class TestContent(BaseModel): field2: int -def test_structured_message(): +def test_structured_message() -> None: # Create a structured message with the test content message = StructuredMessage[TestContent]( source="test_agent", From 491cf8fcef7d4dd3fc32e87503ebc7b022ebe7b8 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 18 Mar 2025 20:17:17 -0700 Subject: [PATCH 03/26] fix doc --- .../agentchat-user-guide/custom-agents.ipynb | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index a6d3d9abc33a..2766b1eb7188 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -319,7 +319,7 @@ "\n", "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, StructuredMessage\n", "from autogen_core import CancellationToken\n", "from autogen_core.model_context import UnboundedChatCompletionContext\n", "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", @@ -363,7 +363,11 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", "\n", " # Get conversation history\n", " history = [\n", @@ -538,7 +542,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -599,7 +603,11 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", "\n", " # Get conversation history\n", " history = [\n", @@ -728,7 +736,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.7" } }, "nbformat": 4, From c80802bc95518102a64b73aab21cfea2dbaf92d3 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 23 Mar 2025 14:17:40 -0700 Subject: [PATCH 04/26] refactor to use base class instead of union for agent chat messages --- .../Templates/SelectorGroupChat/scenario.py | 14 +- .../agents/_assistant_agent.py | 52 +- .../agents/_base_chat_agent.py | 27 +- .../agents/_code_executor_agent.py | 6 +- .../agents/_society_of_mind_agent.py | 22 +- .../agents/_user_proxy_agent.py | 12 +- .../src/autogen_agentchat/base/_chat_agent.py | 19 +- .../src/autogen_agentchat/base/_task.py | 10 +- .../autogen_agentchat/base/_termination.py | 10 +- .../conditions/_terminations.py | 23 +- .../src/autogen_agentchat/messages.py | 295 ++- .../src/autogen_agentchat/state/_states.py | 60 +- .../teams/_group_chat/_base_group_chat.py | 32 +- .../_group_chat/_base_group_chat_manager.py | 18 +- .../_group_chat/_chat_agent_container.py | 22 +- .../teams/_group_chat/_events.py | 6 +- .../_magentic_one/_magentic_one_group_chat.py | 6 +- .../_magentic_one_orchestrator.py | 19 +- .../_group_chat/_round_robin_group_chat.py | 22 +- .../teams/_group_chat/_selector_group_chat.py | 60 +- .../teams/_group_chat/_swarm_group_chat.py | 20 +- .../src/autogen_agentchat/ui/_console.py | 50 +- .../tests/test_assistant_agent.py | 4 +- .../tests/test_group_chat.py | 30 +- .../tests/test_group_chat_endpoint.py | 7 +- .../tests/test_group_chat_pause_resume.py | 6 +- .../tests/test_magentic_one_group_chat.py | 6 +- .../autogen-agentchat/tests/test_messages.py | 87 +- .../tests/test_userproxy_agent.py | 4 +- .../agentchat-user-guide/custom-agents.ipynb | 1488 ++++++------ .../agentchat-user-guide/migration-guide.md | 29 +- .../selector-group-chat.ipynb | 2046 ++++++++--------- .../tutorial/termination.ipynb | 1028 ++++----- .../agents/openai/_openai_assistant_agent.py | 16 +- .../web_surfer/_multimodal_web_surfer.py | 12 +- .../task_centric_memory/utils/apprentice.py | 6 +- .../task_centric_memory/utils/page_logger.py | 4 +- .../src/autogen_ext/ui/_rich_console.py | 11 +- .../autogenstudio/teammanager/teammanager.py | 8 +- .../autogenstudio/web/managers/connection.py | 8 +- 40 files changed, 2824 insertions(+), 2781 deletions(-) diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py index e2e1d8fae009..64f6310cd8ba 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py @@ -2,22 +2,20 @@ import os import yaml import warnings -from typing import Sequence +from typing import List, Sequence from autogen_ext.agents.magentic_one import MagenticOneCoderAgent from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.ui import Console from autogen_agentchat.utils import content_to_str -from autogen_core.models import ModelFamily from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor -from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.base import TerminationCondition, TerminatedException from autogen_core.models import ChatCompletionClient from autogen_ext.agents.web_surfer import MultimodalWebSurfer from autogen_ext.agents.file_surfer import FileSurfer from autogen_agentchat.agents import CodeExecutorAgent -from autogen_agentchat.messages import TextMessage, AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage -from autogen_core.models import LLMMessage, UserMessage, AssistantMessage +from autogen_agentchat.messages import BaseMessage, TextMessage, MultiModalMessage, StopMessage +from autogen_core.models import LLMMessage, UserMessage # Suppress warnings about the requests.Session() not being closed warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) @@ -92,7 +90,7 @@ async def main() -> None: result = await Console(stream) # Do one more inference to format the results - final_context: Sequence[LLMMessage] = [] + final_context: List[LLMMessage] = [] for message in result.messages: if isinstance(message, TextMessage): final_context.append(UserMessage(content=message.content, source=message.source)) @@ -135,13 +133,13 @@ def __init__(self, prompt: str, model_client: ChatCompletionClient, termination_ self._model_client = model_client self._termination_phrase = termination_phrase self._terminated = False - self._context: Sequence[LLMMessage] = [] + self._context: List[LLMMessage] = [] @property def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index e849f7ec12bf..6e418cb0f512 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -31,7 +31,6 @@ LLMMessage, ModelFamily, SystemMessage, - UserMessage, ) from autogen_core.tools import BaseTool, FunctionTool from pydantic import BaseModel @@ -41,12 +40,11 @@ from ..base import Handoff as HandoffBase from ..base import Response from ..messages import ( - AgentEvent, - ChatMessage, + BaseChatMessage, + BaseMessage, HandoffMessage, MemoryQueryEvent, ModelClientStreamingChunkEvent, - StructuredMessage, TextMessage, ThoughtEvent, ToolCallExecutionEvent, @@ -695,8 +693,8 @@ def __init__( self._is_running = False @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: - message_types: List[type[ChatMessage]] = [TextMessage] + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + message_types: List[type[BaseChatMessage]] = [TextMessage] if self._handoffs: message_types.append(HandoffMessage) if self._tools: @@ -710,15 +708,15 @@ def model_context(self) -> ChatCompletionContext: """ return self._model_context - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: """ Process the incoming messages with the assistant agent and yield events/responses as they happen. """ @@ -743,7 +741,7 @@ async def on_messages_stream( ) # STEP 2: Update model context with any relevant memory - inner_messages: List[AgentEvent | ChatMessage] = [] + inner_messages: List[BaseMessage] = [] for event_msg in await self._update_model_context_with_memory( memory=memory, model_context=model_context, @@ -808,30 +806,14 @@ async def on_messages_stream( @staticmethod async def _add_messages_to_context( model_context: ChatCompletionContext, - messages: Sequence[ChatMessage], + messages: Sequence[BaseChatMessage], ) -> None: """ - Add incoming user (and possibly handoff) messages to the model context. + Add incoming messages to the model context. """ for msg in messages: - if isinstance(msg, HandoffMessage): - # Add handoff context to the model context. - for context_msg in msg.context: - await model_context.add_message(context_msg) - # Add the handoff message itself to the model context. - await model_context.add_message(UserMessage(content=msg.content, source=msg.source)) - elif isinstance(msg, StructuredMessage): - # Add structured message to the model context by serializing it. - serialized_content = msg.content.model_dump_json() - await model_context.add_message( - UserMessage( - content=serialized_content, - source=msg.source, - ) - ) - else: - # Add the content as user message content. - await model_context.add_message(UserMessage(content=msg.content, source=msg.source)) + for llm_msg in msg.to_llm_messages(): + await model_context.add_message(llm_msg) @staticmethod async def _update_model_context_with_memory( @@ -898,7 +880,7 @@ async def _call_llm( async def _process_model_result( cls, model_result: CreateResult, - inner_messages: List[AgentEvent | ChatMessage], + inner_messages: List[BaseMessage], cancellation_token: CancellationToken, agent_name: str, system_messages: List[SystemMessage], @@ -910,7 +892,7 @@ async def _process_model_result( model_client_stream: bool, reflect_on_tool_use: bool, tool_call_summary_format: str, - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + ) -> AsyncGenerator[BaseMessage | Response, None]: """ Handle final or partial responses from model_result, including tool calls, handoffs, and reflection if needed. @@ -1004,7 +986,7 @@ async def _process_model_result( def _check_and_handle_handoff( model_result: CreateResult, executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[AgentEvent | ChatMessage], + inner_messages: List[BaseMessage], handoffs: Dict[str, HandoffBase], agent_name: str, ) -> Optional[Response]: @@ -1069,7 +1051,7 @@ async def _reflect_on_tool_use_flow( model_client_stream: bool, model_context: ChatCompletionContext, agent_name: str, - inner_messages: List[AgentEvent | ChatMessage], + inner_messages: List[BaseMessage], ) -> AsyncGenerator[Response | ModelClientStreamingChunkEvent | ThoughtEvent, None]: """ If reflect_on_tool_use=True, we do another inference based on tool results @@ -1121,7 +1103,7 @@ async def _reflect_on_tool_use_flow( @staticmethod def _summarize_tool_use( executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[AgentEvent | ChatMessage], + inner_messages: List[BaseMessage], handoffs: Dict[str, HandoffBase], tool_call_summary_format: str, agent_name: str, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index 94b89235df89..f37d46cadfb5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -6,9 +6,8 @@ from ..base import ChatAgent, Response, TaskResult from ..messages import ( - AgentEvent, BaseChatMessage, - ChatMessage, + BaseMessage, ModelClientStreamingChunkEvent, TextMessage, ) @@ -60,13 +59,13 @@ def description(self) -> str: @property @abstractmethod - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: """The types of messages that the agent produces in the - :attr:`Response.chat_message` field. They must be :class:`ChatMessage` types.""" + :attr:`Response.chat_message` field. They must be :class:`BaseChatMessage` types.""" ... @abstractmethod - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: """Handles incoming messages and returns a response. .. note:: @@ -82,8 +81,8 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: ... async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: """Handles incoming messages and returns a stream of messages and and the final item is the response. The base implementation in :class:`BaseChatAgent` simply calls :meth:`on_messages` and yields @@ -107,14 +106,14 @@ async def on_messages_stream( async def run( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the agent with the given task and return the result.""" if cancellation_token is None: cancellation_token = CancellationToken() - input_messages: List[ChatMessage] = [] - output_messages: List[AgentEvent | ChatMessage] = [] + input_messages: List[BaseChatMessage] = [] + output_messages: List[BaseMessage] = [] if task is None: pass elif isinstance(task, str): @@ -143,15 +142,15 @@ async def run( async def run_stream( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: + ) -> AsyncGenerator[BaseMessage | TaskResult, None]: """Run the agent with the given task and return a stream of messages and the final task result as the last item in the stream.""" if cancellation_token is None: cancellation_token = CancellationToken() - input_messages: List[ChatMessage] = [] - output_messages: List[AgentEvent | ChatMessage] = [] + input_messages: List[BaseChatMessage] = [] + output_messages: List[BaseMessage] = [] if task is None: pass elif isinstance(task, str): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index e94ce4550efb..ccd57ed219af 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -7,7 +7,7 @@ from typing_extensions import Self from ..base import Response -from ..messages import ChatMessage, TextMessage +from ..messages import BaseChatMessage, TextMessage from ._base_chat_agent import BaseChatAgent @@ -119,11 +119,11 @@ def __init__( self._sources = sources @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: """The types of messages that the code executor agent produces.""" return (TextMessage,) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: # Extract code blocks from the messages. code_blocks: List[CodeBlock] = [] for msg in messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index f991e04ec482..a777879eaea8 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -1,7 +1,7 @@ from typing import Any, AsyncGenerator, List, Mapping, Sequence from autogen_core import CancellationToken, Component, ComponentModel -from autogen_core.models import ChatCompletionClient, LLMMessage, SystemMessage, UserMessage +from autogen_core.models import ChatCompletionClient, LLMMessage, SystemMessage from pydantic import BaseModel from typing_extensions import Self @@ -10,11 +10,9 @@ from ..base import TaskResult, Team from ..messages import ( - AgentEvent, BaseChatMessage, - ChatMessage, + BaseMessage, ModelClientStreamingChunkEvent, - StructuredMessage, TextMessage, ) from ._base_chat_agent import BaseChatAgent @@ -124,10 +122,10 @@ def __init__( self._response_prompt = response_prompt @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: # Call the stream method and collect the messages. response: Response | None = None async for msg in self.on_messages_stream(messages, cancellation_token): @@ -137,14 +135,14 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: return response async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: # Prepare the task for the team of agents. task = list(messages) # Run the team of agents. result: TaskResult | None = None - inner_messages: List[AgentEvent | ChatMessage] = [] + inner_messages: List[BaseMessage] = [] count = 0 async for inner_msg in self._team.run_stream(task=task, cancellation_token=cancellation_token): if isinstance(inner_msg, TaskResult): @@ -170,11 +168,7 @@ async def on_messages_stream( llm_messages: List[LLMMessage] = [SystemMessage(content=self._instruction)] for message in messages: if isinstance(message, BaseChatMessage): - if isinstance(message, StructuredMessage): - serialized_message = message.content.model_dump_json() - llm_messages.append(UserMessage(content=serialized_message, source=message.source)) - else: - llm_messages.append(UserMessage(content=message.content, source=message.source)) + llm_messages.extend(message.to_llm_messages()) llm_messages.append(SystemMessage(content=self._response_prompt)) completion = await self._model_client.create(messages=llm_messages, cancellation_token=cancellation_token) assert isinstance(completion.content, str) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 3ca0ec890324..9b5c8a449b6b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -10,7 +10,7 @@ from typing_extensions import Self from ..base import Response -from ..messages import AgentEvent, ChatMessage, HandoffMessage, TextMessage, UserInputRequestedEvent +from ..messages import BaseChatMessage, BaseMessage, HandoffMessage, TextMessage, UserInputRequestedEvent from ._base_chat_agent import BaseChatAgent SyncInputFunc = Callable[[str], str] @@ -168,11 +168,11 @@ def __init__( self._is_async = iscoroutinefunction(self.input_func) @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: """Message types this agent can produce.""" return (TextMessage, HandoffMessage) - def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[HandoffMessage]: + def _get_latest_handoff(self, messages: Sequence[BaseChatMessage]) -> Optional[HandoffMessage]: """Find the HandoffMessage in the message sequence that addresses this agent.""" if len(messages) > 0 and isinstance(messages[-1], HandoffMessage): if messages[-1].target == self.name: @@ -199,15 +199,15 @@ async def _get_input(self, prompt: str, cancellation_token: Optional[Cancellatio except Exception as e: raise RuntimeError(f"Failed to get user input: {str(e)}") from e - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: """Handle incoming messages by requesting user input.""" try: # Check for handoff first diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index ec885ee7f8cb..e54df1f22953 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -5,7 +5,7 @@ from autogen_core import CancellationToken, ComponentBase from pydantic import BaseModel -from ..messages import AgentEvent, ChatMessage +from ..messages import BaseChatMessage, BaseMessage from ._task import TaskRunner @@ -13,12 +13,11 @@ class Response: """A response from calling :meth:`ChatAgent.on_messages`.""" - chat_message: ChatMessage + chat_message: BaseChatMessage """A chat message produced by the agent as the response.""" - inner_messages: Sequence[AgentEvent | ChatMessage] | None = None - """Inner messages produced by the agent, they can be :class:`AgentEvent` - or :class:`ChatMessage`.""" + inner_messages: Sequence[BaseMessage] | None = None + """Inner messages produced by the agent.""" class ChatAgent(ABC, TaskRunner, ComponentBase[BaseModel]): @@ -43,20 +42,20 @@ def description(self) -> str: @property @abstractmethod - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: """The types of messages that the agent produces in the - :attr:`Response.chat_message` field. They must be :class:`ChatMessage` types.""" + :attr:`Response.chat_message` field. They must be :class:`BaseChatMessage` types.""" ... @abstractmethod - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: """Handles incoming messages and returns a response.""" ... @abstractmethod def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: """Handles incoming messages and returns a stream of inner messages and and the final item is the response.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py index 90e319ee36c6..f859978968f3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py @@ -3,14 +3,14 @@ from autogen_core import CancellationToken -from ..messages import AgentEvent, ChatMessage +from ..messages import BaseChatMessage, BaseMessage @dataclass class TaskResult: """Result of running a task.""" - messages: Sequence[AgentEvent | ChatMessage] + messages: Sequence[BaseMessage] """Messages produced by the task.""" stop_reason: str | None = None @@ -23,7 +23,7 @@ class TaskRunner(Protocol): async def run( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the task and return the result. @@ -38,9 +38,9 @@ async def run( def run_stream( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: + ) -> AsyncGenerator[BaseMessage | TaskResult, None]: """Run the task and produces a stream of messages and the final result :class:`TaskResult` as the last item in the stream. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index d8a3adb96818..ae9b06578b5d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -6,7 +6,7 @@ from pydantic import BaseModel from typing_extensions import Self -from ..messages import AgentEvent, ChatMessage, StopMessage +from ..messages import BaseMessage, StopMessage class TerminatedException(BaseException): ... @@ -15,7 +15,7 @@ class TerminatedException(BaseException): ... class TerminationCondition(ABC, ComponentBase[BaseModel]): """A stateful condition that determines when a conversation should be terminated. - A termination condition is a callable that takes a sequence of ChatMessage objects + A termination condition is a callable that takes a sequence of :class:`BaseMessage` objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again. @@ -56,7 +56,7 @@ def terminated(self) -> bool: ... @abstractmethod - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: """Check if the conversation should be terminated based on the messages received since the last time the condition was called. Return a StopMessage if the conversation should be terminated, or None otherwise. @@ -102,7 +102,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return all(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached.") # Check all remaining conditions. @@ -153,7 +153,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return any(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self.terminated: raise RuntimeError("Termination condition has already been reached") stop_messages = await asyncio.gather(*[condition(messages) for condition in self._conditions]) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 7ccddd1f6da4..443a172549e0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -7,9 +7,8 @@ from ..base import TerminatedException, TerminationCondition from ..messages import ( - AgentEvent, BaseChatMessage, - ChatMessage, + BaseMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -35,7 +34,7 @@ def __init__(self) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -81,7 +80,7 @@ def __init__(self, max_messages: int, include_agent_event: bool = False) -> None def terminated(self) -> bool: return self._message_count >= self._max_messages - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, BaseChatMessage)]) @@ -130,7 +129,7 @@ def __init__(self, text: str, sources: Sequence[str] | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -208,7 +207,7 @@ def terminated(self) -> bool: or (self._max_completion_token is not None and self._completion_token_count >= self._max_completion_token) ) - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -265,7 +264,7 @@ def __init__(self, target: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -310,7 +309,7 @@ def __init__(self, timeout_seconds: float) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") @@ -372,7 +371,7 @@ def set(self) -> None: """Set the termination condition to terminated.""" self._setted = True - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if self._setted: @@ -417,7 +416,7 @@ def __init__(self, sources: List[str]) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if not messages: @@ -470,7 +469,7 @@ def __init__(self, source: str | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -520,7 +519,7 @@ def __init__(self, function_name: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 88f22e23f550..d992bed96d6c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -4,20 +4,22 @@ class and includes specific fields relevant to the type of message being sent. """ -import importlib -from abc import ABC -from typing import Any, Dict, Generic, List, Literal, TypeVar +from abc import ABC, abstractmethod +from typing import Any, Dict, Generic, List, Literal, Mapping, TypeVar from autogen_core import FunctionCall, Image from autogen_core.memory import MemoryContent -from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage -from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Annotated, Self +from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage, UserMessage +from pydantic import BaseModel, ConfigDict, Field, computed_field +from typing_extensions import Annotated class BaseMessage(BaseModel, ABC): """Base class for all message types.""" + content: Any + """The content of the message.""" + source: str """The name of the agent that sent this message.""" @@ -29,80 +31,106 @@ class BaseMessage(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) + @computed_field + def type(self) -> str: + """Get the class name.""" + return self.__class__.__name__ + + @abstractmethod + def content_to_render(self) -> str: + """Convert the content of the message to a string-only representation. + This is used for rendering the message in the UI.""" + ... + class BaseChatMessage(BaseMessage, ABC): - """Base class for chat messages.""" + """Base class for chat messages. + + This class is used for messages that are sent between agents in a chat + conversation. Agents are expected to use process the content of the + message using either models or code and return a response as another + chat message.""" + + @abstractmethod + def content_to_str(self) -> str: + """Convert the content of the message to a string-only representation. + This is used for creating a text-only content for models. + + This is not used for rendering the message in the UI. For that, use + :meth:`content_to_render`. + + The difference between this and :meth:`to_llm_messages` is that this + is used to construct parts of the a message for the model client, + while :meth:`to_llm_messages` is used to create complete messages + for the model client. + """ + ... + + @abstractmethod + def to_llm_messages(self) -> List[LLMMessage]: + """Convert the message to a list of :class:`~autogen_core.models.LLMMessage` + for use with the model client.""" + ... + + +class BaseTextChatMessage(BaseChatMessage, ABC): + """Base class for all text-only chat message types.""" - pass + content: str + """The content of the message.""" + + def content_to_render(self) -> str: + return self.content + + def content_to_str(self) -> str: + return self.content + + def to_llm_messages(self) -> List[LLMMessage]: + return [UserMessage(content=self.content, source=self.source)] class BaseAgentEvent(BaseMessage, ABC): - """Base class for agent events.""" + """Base class for agent events. + + Agent events are used to signal actions and thoughts produced by agents + and teams to user and applications. They are not used for agent-to-agent + communication and are not expected to be processed by other agents. + """ - pass + def content_to_render(self) -> str: + return str(self.content) -ContentType = TypeVar("ContentType", bound=BaseModel, covariant=True) +StructuredContentType = TypeVar("StructuredContentType", bound=BaseModel, covariant=True) +"""Type variable for structured content types.""" -class StructuredMessage(BaseChatMessage, Generic[ContentType]): +class StructuredMessage(BaseChatMessage, Generic[StructuredContentType]): """A structured message with a specific content type.""" - content: ContentType + content: StructuredContentType """The content of the message. Must be a subclass of `Pydantic BaseModel `_.""" - content_class_path: str | None = None - """The path to the content class. This is set automatically when the message is created.""" - - type: Literal["StructuredMessage"] = "StructuredMessage" - - def __init__(self, **data: Any) -> None: - super().__init__(**data) - module_name = self.content.__class__.__module__ - class_name = self.content.__class__.__qualname__ - self.content_class_path = f"{module_name}.{class_name}" - - def dump(self) -> Dict[str, Any]: - """Dump the message to a dictionary. This is used for serialization - and ensures that the content is serialized correctly.""" - data = super().model_dump() - data["content"] = self.content.model_dump() - return data - - @classmethod - def load(cls, obj: Dict[str, Any]) -> Self: - """Load the message from a dictionary. This is used for deserialization - and ensures that the content is deserialized correctly.""" - content_class_path = obj["content_class_path"] - module_name, class_name = content_class_path.rsplit(".", 1) - try: - module = importlib.import_module(module_name) - except ImportError as e: - raise ImportError( - f"Could not import module {module_name} when loading content class {class_name} for StructuredMessage. Ensure it is installed." - ) from e - if not hasattr(module, class_name): - raise ValueError( - f"Could not find class {class_name} in module {module_name} when loading content class for StructuredMessage." - ) - content_class = getattr(module, class_name) - if not issubclass(content_class, BaseModel): - raise ValueError(f"Invalid content class: {content_class}, must be a subclass of BaseModel") - content = content_class.model_validate(obj["content"]) - instance = super().model_validate(obj) - instance.content = content # type: ignore - instance.content_class_path = content_class_path - return instance + def content_to_render(self) -> str: + return self.content.model_dump_json(indent=2) + def content_to_str(self) -> str: + return self.content.model_dump_json() -class TextMessage(BaseChatMessage): - """A text message.""" + def to_llm_messages(self) -> List[LLMMessage]: + return [ + UserMessage( + content=self.content.model_dump_json(), + source=self.source, + ) + ] - content: str - """The content of the message.""" - type: Literal["TextMessage"] = "TextMessage" +class TextMessage(BaseTextChatMessage): + """A text message with string-only content.""" + + ... class MultiModalMessage(BaseChatMessage): @@ -111,31 +139,63 @@ class MultiModalMessage(BaseChatMessage): content: List[str | Image] """The content of the message.""" - type: Literal["MultiModalMessage"] = "MultiModalMessage" - - -class StopMessage(BaseChatMessage): + def content_to_str(self, image_placeholder: str | None = None) -> str: + """Convert the content of the message to a string-only representation. + If an image is present, it will be replaced with the image placeholder + if provided, otherwise it will be a base64 string. + """ + text = "" + for c in self.content: + if isinstance(c, str): + text += c + elif isinstance(c, Image): + if image_placeholder is not None: + text += f" {image_placeholder}" + else: + text += f" {c.to_base64()}" + return text + + def content_to_render(self, iterm: bool = False) -> str: + result: List[str] = [] + for c in self.content: + if isinstance(c, str): + result.append(c) + else: + if iterm: + # iTerm2 image rendering protocol: https://iterm2.com/documentation-images.html + image_data = c.to_base64() + result.append(f"\033]1337;File=inline=1:{image_data}\a\n") + else: + result.append("") + return "\n".join(result) + + def to_llm_messages(self) -> List[LLMMessage]: + return [UserMessage(content=self.content, source=self.source)] + + +class StopMessage(BaseTextChatMessage): """A message requesting stop of a conversation.""" - content: str - """The content for the stop message.""" - - type: Literal["StopMessage"] = "StopMessage" + ... -class HandoffMessage(BaseChatMessage): +class HandoffMessage(BaseTextChatMessage): """A message requesting handoff of a conversation to another agent.""" target: str """The name of the target agent to handoff to.""" - content: str - """The handoff message to the target agent.""" - context: List[LLMMessage] = [] """The model context to be passed to the target agent.""" - type: Literal["HandoffMessage"] = "HandoffMessage" + def to_llm_messages(self) -> List[LLMMessage]: + return [*self.context, UserMessage(content=self.content, source=self.source)] + + +class ToolCallSummaryMessage(BaseTextChatMessage): + """A message signaling the summary of tool call results.""" + + ... class ToolCallRequestEvent(BaseAgentEvent): @@ -144,8 +204,6 @@ class ToolCallRequestEvent(BaseAgentEvent): content: List[FunctionCall] """The tool calls.""" - type: Literal["ToolCallRequestEvent"] = "ToolCallRequestEvent" - class ToolCallExecutionEvent(BaseAgentEvent): """An event signaling the execution of tool calls.""" @@ -153,17 +211,6 @@ class ToolCallExecutionEvent(BaseAgentEvent): content: List[FunctionExecutionResult] """The tool call results.""" - type: Literal["ToolCallExecutionEvent"] = "ToolCallExecutionEvent" - - -class ToolCallSummaryMessage(BaseChatMessage): - """A message signaling the summary of tool call results.""" - - content: str - """Summary of the the tool call results.""" - - type: Literal["ToolCallSummaryMessage"] = "ToolCallSummaryMessage" - class UserInputRequestedEvent(BaseAgentEvent): """An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback.""" @@ -174,8 +221,6 @@ class UserInputRequestedEvent(BaseAgentEvent): content: Literal[""] = "" """Empty content for compat with consumers expecting a content field.""" - type: Literal["UserInputRequestedEvent"] = "UserInputRequestedEvent" - class MemoryQueryEvent(BaseAgentEvent): """An event signaling the results of memory queries.""" @@ -183,39 +228,28 @@ class MemoryQueryEvent(BaseAgentEvent): content: List[MemoryContent] """The memory query results.""" - type: Literal["MemoryQueryEvent"] = "MemoryQueryEvent" - class ModelClientStreamingChunkEvent(BaseAgentEvent): """An event signaling a text output chunk from a model client in streaming mode.""" content: str - """The partial text chunk.""" - - type: Literal["ModelClientStreamingChunkEvent"] = "ModelClientStreamingChunkEvent" + """A string chunk from the model client.""" class ThoughtEvent(BaseAgentEvent): - """An event signaling the thought process of an agent. + """An event signaling the thought process of a model. It is used to communicate the reasoning tokens generated by a reasoning model, or the extra text content generated by a function call.""" content: str - """The thought process.""" - - type: Literal["ThoughtEvent"] = "ThoughtEvent" + """The thought process of the model.""" ChatMessage = Annotated[ - StructuredMessage[BaseModel] - | TextMessage - | MultiModalMessage - | StopMessage - | ToolCallSummaryMessage - | HandoffMessage, + TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type"), ] -"""Messages for agent-to-agent communication only.""" +"""Builtin chat message types for agent-to-agent communication only.""" AgentEvent = Annotated[ @@ -227,7 +261,57 @@ class ThoughtEvent(BaseAgentEvent): | ThoughtEvent, Field(discriminator="type"), ] -"""Events emitted by agents and teams when they work, not used for agent-to-agent communication.""" +"""Builtin agent events emitted by agents and teams when they work, not used for agent-to-agent communication.""" + + +class MessageFactory: + """A factory for creating messages from JSON-serializable dictionaries. + + This is useful for deserializing messages from JSON data. + """ + + def __init__(self): + self._message_types: Dict[str, type[BaseMessage]] = {} + # Register all message types. + self._message_types[TextMessage.__name__] = TextMessage + self._message_types[MultiModalMessage.__name__] = MultiModalMessage + self._message_types[StopMessage.__name__] = StopMessage + self._message_types[ToolCallSummaryMessage.__name__] = ToolCallSummaryMessage + self._message_types[HandoffMessage.__name__] = HandoffMessage + self._message_types[ToolCallRequestEvent.__name__] = ToolCallRequestEvent + self._message_types[ToolCallExecutionEvent.__name__] = ToolCallExecutionEvent + self._message_types[MemoryQueryEvent.__name__] = MemoryQueryEvent + self._message_types[UserInputRequestedEvent.__name__] = UserInputRequestedEvent + self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent + self._message_types[ThoughtEvent.__name__] = ThoughtEvent + + def register(self, message_type: type[BaseMessage]) -> None: + """Register a new message type with the factory.""" + if not issubclass(message_type, BaseMessage): + raise ValueError(f"Message type {message_type} must be a subclass of BaseMessage.") + # Get the class name of the + class_name = message_type.__name__ + # Check if the class name is already registered. + if class_name in self._message_types: + raise ValueError(f"Message type {class_name} is already registered.") + # Register the message type. + self._message_types[class_name] = message_type + + def create(self, data: Mapping[str, Any]) -> BaseMessage: + """Create a message from a dictionary of JSON-serializable data.""" + # Get the type of the message from the dictionary. + message_type = data.get("type") + if message_type not in self._message_types: + raise ValueError(f"Unknown message type: {message_type}") + if not isinstance(message_type, str): + raise ValueError(f"Message type must be a string, got {type(message_type)}") + + # Get the class for the message type. + message_class = self._message_types[message_type] + + # Create an instance of the message class. + assert issubclass(message_class, BaseMessage) + return message_class.model_validate(data) __all__ = [ @@ -246,4 +330,5 @@ class ThoughtEvent(BaseAgentEvent): "UserInputRequestedEvent", "ModelClientStreamingChunkEvent", "ThoughtEvent", + "MessageFactory", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py index d23a3de5fc9c..ecc7b5f7cae7 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/state/_states.py @@ -1,16 +1,6 @@ -from typing import Annotated, Any, Dict, List, Mapping, Optional +from typing import Any, List, Mapping, Optional from pydantic import BaseModel, Field -from typing_extensions import Self - -from ..messages import ( - AgentEvent, - ChatMessage, - StructuredMessage, -) - -# Ensures pydantic can distinguish between types of events & messages. -_AgentMessage = Annotated[AgentEvent | ChatMessage, Field(discriminator="type")] class BaseState(BaseModel): @@ -37,62 +27,18 @@ class TeamState(BaseState): class BaseGroupChatManagerState(BaseState): """Base state for all group chat managers.""" - message_thread: List[_AgentMessage] = Field(default_factory=list) + message_thread: List[Mapping[str, Any]] = Field(default_factory=list) current_turn: int = Field(default=0) type: str = Field(default="BaseGroupChatManagerState") - def model_dump(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: - """Override model_dump to ensure StructuredMessage is handled properly.""" - state = super().model_dump(*args, **kwargs) - for i, message in enumerate(self.message_thread): - if isinstance(message, StructuredMessage): - state["message_thread"][i] = message.dump() - return state - - @classmethod - def model_validate(cls, obj: Any, **kwargs: Any) -> Self: - """Override model_validate to ensure message_thread is handled properly for StructuredMessage.""" - messages = obj["message_thread"] - instance = super().model_validate(obj, **kwargs) - for i, message in enumerate(messages): - # For each message, check if it's a structured message and validate it - # If it is, replace it with the validated instance - # Otherwise, leave it as is. - if message["type"] == "StructuredMessage": - structured_message = StructuredMessage[BaseModel].load(message) - instance.message_thread[i] = structured_message - return instance - class ChatAgentContainerState(BaseState): """State for a container of chat agents.""" agent_state: Mapping[str, Any] = Field(default_factory=dict) - message_buffer: List[ChatMessage] = Field(default_factory=list) + message_buffer: List[Mapping[str, Any]] = Field(default_factory=list) type: str = Field(default="ChatAgentContainerState") - def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]: - """Override model_dump to ensure StructuredMessage is handled properly.""" - state = super().model_dump(*args, **kwargs) - for i, message in enumerate(self.message_buffer): - if isinstance(message, StructuredMessage): - state["message_buffer"][i] = message.dump() - return state - - @classmethod - def model_validate(cls, obj: Any, **kwargs: Any) -> Self: - """Override model_validate to ensure message_buffer is handled properly for StructuredMessage.""" - messages = obj["message_buffer"] - instance = super().model_validate(obj, **kwargs) - for i, message in enumerate(messages): - # For each message, check if it's a structured message and validate it - # If it is, replace it with the validated instance - # Otherwise, leave it as is. - if message["type"] == "StructuredMessage": - structured_message = StructuredMessage[BaseModel].load(message) - instance.message_buffer[i] = structured_message - return instance - class RoundRobinManagerState(BaseGroupChatManagerState): """State for :class:`~autogen_agentchat.teams.RoundRobinGroupChat` manager.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index ff6731b03f4a..a33d5897c20f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -18,9 +18,9 @@ from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TaskResult, Team, TerminationCondition from ...messages import ( - AgentEvent, BaseChatMessage, - ChatMessage, + BaseMessage, + MessageFactory, ModelClientStreamingChunkEvent, StopMessage, TextMessage, @@ -50,6 +50,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, + custom_message_types: List[type[BaseMessage]] | None = None, ): if len(participants) == 0: raise ValueError("At least one participant is required.") @@ -59,6 +60,10 @@ def __init__( self._base_group_chat_manager_class = group_chat_manager_class self._termination_condition = termination_condition self._max_turns = max_turns + self._message_factory = MessageFactory() + if custom_message_types is not None: + for message_type in custom_message_types: + self._message_factory.register(message_type) # The team ID is a UUID that is used to identify the team and its participants # in the agent runtime. It is used to create unique topic types for each participant. @@ -85,7 +90,7 @@ def __init__( self._output_topic_type = f"output_topic_{self._team_id}" # The queue for collecting the output messages. - self._output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination] = asyncio.Queue() + self._output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination] = asyncio.Queue() # Create a runtime for the team. if runtime is not None: @@ -112,9 +117,10 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> Callable[[], SequentialRoutedAgent]: ... def _create_participant_factory( @@ -122,9 +128,10 @@ def _create_participant_factory( parent_topic_type: str, output_topic_type: str, agent: ChatAgent, + message_factory: MessageFactory, ) -> Callable[[], ChatAgentContainer]: def _factory() -> ChatAgentContainer: - container = ChatAgentContainer(parent_topic_type, output_topic_type, agent) + container = ChatAgentContainer(parent_topic_type, output_topic_type, agent, message_factory) return container return _factory @@ -140,7 +147,9 @@ async def _init(self, runtime: AgentRuntime) -> None: await ChatAgentContainer.register( runtime, type=agent_type, - factory=self._create_participant_factory(self._group_topic_type, self._output_topic_type, participant), + factory=self._create_participant_factory( + self._group_topic_type, self._output_topic_type, participant, self._message_factory + ), ) # Add subscriptions for the participant. # The participant should be able to receive messages from its own topic. @@ -162,6 +171,7 @@ async def _init(self, runtime: AgentRuntime) -> None: output_message_queue=self._output_message_queue, termination_condition=self._termination_condition, max_turns=self._max_turns, + message_factory=self._message_factory, ), ) # Add subscriptions for the group chat manager. @@ -185,7 +195,7 @@ async def _init(self, runtime: AgentRuntime) -> None: async def run( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the team and return the result. The base implementation uses @@ -287,9 +297,9 @@ async def main() -> None: async def run_stream( self, *, - task: str | ChatMessage | Sequence[ChatMessage] | None = None, + task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: + ) -> AsyncGenerator[BaseMessage | TaskResult, None]: """Run the team and produces a stream of messages and the final result of the type :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. Once the team is stopped, the termination condition is reset. @@ -388,7 +398,7 @@ async def main() -> None: """ # Create the messages list if the task is a string or a chat message. - messages: List[ChatMessage] | None = None + messages: List[BaseChatMessage] | None = None if task is None: pass elif isinstance(task, str): @@ -448,7 +458,7 @@ async def stop_runtime() -> None: cancellation_token=cancellation_token, ) # Collect the output messages in order. - output_messages: List[AgentEvent | ChatMessage] = [] + output_messages: List[BaseMessage] = [] stop_reason: str | None = None # Yield the messsages until the queue is empty. while True: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py index 0cd45633728a..cbf1c5b56c7e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py @@ -5,7 +5,7 @@ from autogen_core import DefaultTopicId, MessageContext, event, rpc from ...base import TerminationCondition -from ...messages import AgentEvent, ChatMessage, StopMessage +from ...messages import BaseChatMessage, BaseMessage, MessageFactory, StopMessage from ._events import ( GroupChatAgentResponse, GroupChatMessage, @@ -39,9 +39,10 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], - termination_condition: TerminationCondition | None = None, - max_turns: int | None = None, + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + termination_condition: TerminationCondition | None, + max_turns: int | None, + message_factory: MessageFactory, ): super().__init__( description="Group chat manager", @@ -66,13 +67,14 @@ def __init__( name: topic_type for name, topic_type in zip(participant_names, participant_topic_types, strict=True) } self._participant_descriptions = participant_descriptions - self._message_thread: List[AgentEvent | ChatMessage] = [] + self._message_thread: List[BaseMessage] = [] self._output_message_queue = output_message_queue self._termination_condition = termination_condition if max_turns is not None and max_turns <= 0: raise ValueError("The maximum number of turns must be greater than 0.") self._max_turns = max_turns self._current_turn = 0 + self._message_factory = message_factory @rpc async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> None: @@ -139,7 +141,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # Append the message to the message thread and construct the delta. - delta: List[AgentEvent | ChatMessage] = [] + delta: List[BaseMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: self._message_thread.append(inner_message) @@ -223,7 +225,7 @@ async def handle_resume(self, message: GroupChatResume, ctx: MessageContext) -> pass @abstractmethod - async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: """Validate the state of the group chat given the start messages. This is executed when the group chat manager receives a GroupChatStart event. @@ -233,7 +235,7 @@ async def validate_group_state(self, messages: List[ChatMessage] | None) -> None ... @abstractmethod - async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: + async def select_speaker(self, thread: List[BaseMessage]) -> str: """Select a speaker from the participants and return the topic type of the selected speaker.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index 7c86556e257a..7abf0c6a7b3f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -2,8 +2,9 @@ from autogen_core import DefaultTopicId, MessageContext, event, rpc +from autogen_agentchat.messages import BaseChatMessage, MessageFactory + from ...base import ChatAgent, Response -from ...messages import ChatMessage from ...state import ChatAgentContainerState from ._events import ( GroupChatAgentResponse, @@ -28,7 +29,9 @@ class ChatAgentContainer(SequentialRoutedAgent): agent (ChatAgent): The agent to delegate message handling to. """ - def __init__(self, parent_topic_type: str, output_topic_type: str, agent: ChatAgent) -> None: + def __init__( + self, parent_topic_type: str, output_topic_type: str, agent: ChatAgent, message_factory: MessageFactory + ) -> None: super().__init__( description=agent.description, sequential_message_types=[ @@ -41,7 +44,8 @@ def __init__(self, parent_topic_type: str, output_topic_type: str, agent: ChatAg self._parent_topic_type = parent_topic_type self._output_topic_type = output_topic_type self._agent = agent - self._message_buffer: List[ChatMessage] = [] + self._message_buffer: List[BaseChatMessage] = [] + self._message_factory = message_factory @event async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> None: @@ -105,10 +109,18 @@ async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> None: async def save_state(self) -> Mapping[str, Any]: agent_state = await self._agent.save_state() - state = ChatAgentContainerState(agent_state=agent_state, message_buffer=list(self._message_buffer)) + state = ChatAgentContainerState( + agent_state=agent_state, message_buffer=[message.model_dump() for message in self._message_buffer] + ) return state.model_dump() async def load_state(self, state: Mapping[str, Any]) -> None: container_state = ChatAgentContainerState.model_validate(state) - self._message_buffer = list(container_state.message_buffer) + self._message_buffer = [] + for message_data in container_state.message_buffer: + message = self._message_factory.create(message_data) + if isinstance(message, BaseChatMessage): + self._message_buffer.append(message) + else: + raise ValueError(f"Invalid message type in message buffer: {type(message)}") await self._agent.load_state(container_state.agent_state) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py index f705a54c4a50..062e77d31fe5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py @@ -3,13 +3,13 @@ from pydantic import BaseModel from ...base import Response -from ...messages import AgentEvent, ChatMessage, StopMessage +from ...messages import BaseChatMessage, BaseMessage, StopMessage class GroupChatStart(BaseModel): """A request to start a group chat.""" - messages: List[ChatMessage] | None = None + messages: List[BaseChatMessage] | None = None """An optional list of messages to start the group chat.""" @@ -29,7 +29,7 @@ class GroupChatRequestPublish(BaseModel): class GroupChatMessage(BaseModel): """A message from a group chat.""" - message: AgentEvent | ChatMessage + message: BaseMessage """The message that was published.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py index d391f7b62ff8..c73363dc24ea 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py @@ -9,7 +9,7 @@ from .... import EVENT_LOGGER_NAME, TRACE_LOGGER_NAME from ....base import ChatAgent, TerminationCondition -from ....messages import AgentEvent, ChatMessage +from ....messages import BaseMessage, MessageFactory from .._base_group_chat import BaseGroupChat from .._events import GroupChatTermination from ._magentic_one_orchestrator import MagenticOneOrchestrator @@ -128,9 +128,10 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> Callable[[], MagenticOneOrchestrator]: return lambda: MagenticOneOrchestrator( name, @@ -140,6 +141,7 @@ def _create_group_chat_manager_factory( participant_names, participant_descriptions, max_turns, + message_factory, self._model_client, self._max_stalls, self._final_answer_prompt, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index bfef2b4ed184..656104651463 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -15,9 +15,10 @@ from .... import TRACE_LOGGER_NAME from ....base import Response, TerminationCondition from ....messages import ( - AgentEvent, - ChatMessage, + BaseChatMessage, + BaseMessage, HandoffMessage, + MessageFactory, MultiModalMessage, StopMessage, TextMessage, @@ -61,10 +62,11 @@ def __init__( participant_names: List[str], participant_descriptions: List[str], max_turns: int | None, + message_factory: MessageFactory, model_client: ChatCompletionClient, max_stalls: int, final_answer_prompt: str, - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, ): super().__init__( @@ -77,6 +79,7 @@ def __init__( output_message_queue, termination_condition, max_turns, + message_factory, ) self._model_client = model_client self._max_stalls = max_stalls @@ -181,7 +184,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # type: ignore - delta: List[AgentEvent | ChatMessage] = [] + delta: List[BaseMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: delta.append(inner_message) @@ -198,12 +201,12 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess return await self._orchestrate_step(ctx.cancellation_token) - async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: pass async def save_state(self) -> Mapping[str, Any]: state = MagenticOneOrchestratorState( - message_thread=list(self._message_thread), + message_thread=[msg.model_dump() for msg in self._message_thread], current_turn=self._current_turn, task=self._task, facts=self._facts, @@ -215,7 +218,7 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: orchestrator_state = MagenticOneOrchestratorState.model_validate(state) - self._message_thread = orchestrator_state.message_thread + self._message_thread = [self._message_factory.create(message) for message in orchestrator_state.message_thread] self._current_turn = orchestrator_state.current_turn self._task = orchestrator_state.task self._facts = orchestrator_state.facts @@ -223,7 +226,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._n_rounds = orchestrator_state.n_rounds self._n_stalls = orchestrator_state.n_stalls - async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: + async def select_speaker(self, thread: List[BaseMessage]) -> str: """Not used in this orchestrator, we select next speaker in _orchestrate_step.""" return "" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 0e630df2d7cb..3aa5e37007ee 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -6,7 +6,7 @@ from typing_extensions import Self from ...base import ChatAgent, TerminationCondition -from ...messages import AgentEvent, ChatMessage +from ...messages import BaseChatMessage, BaseMessage, MessageFactory from ...state import RoundRobinManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -24,9 +24,10 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, - max_turns: int | None = None, + max_turns: int | None, + message_factory: MessageFactory, ) -> None: super().__init__( name, @@ -38,10 +39,11 @@ def __init__( output_message_queue, termination_condition, max_turns, + message_factory, ) self._next_speaker_index = 0 - async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: pass async def reset(self) -> None: @@ -53,7 +55,7 @@ async def reset(self) -> None: async def save_state(self) -> Mapping[str, Any]: state = RoundRobinManagerState( - message_thread=list(self._message_thread), + message_thread=[message.model_dump() for message in self._message_thread], current_turn=self._current_turn, next_speaker_index=self._next_speaker_index, ) @@ -61,11 +63,11 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: round_robin_state = RoundRobinManagerState.model_validate(state) - self._message_thread = list(round_robin_state.message_thread) + self._message_thread = [self._message_factory.create(message) for message in round_robin_state.message_thread] self._current_turn = round_robin_state.current_turn self._next_speaker_index = round_robin_state.next_speaker_index - async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: + async def select_speaker(self, thread: List[BaseMessage]) -> str: """Select a speaker from the participants in a round-robin fashion.""" current_speaker_index = self._next_speaker_index self._next_speaker_index = (current_speaker_index + 1) % len(self._participant_names) @@ -164,6 +166,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, + custom_message_types: List[type[BaseMessage]] | None = None, ) -> None: super().__init__( participants, @@ -172,6 +175,7 @@ def __init__( termination_condition=termination_condition, max_turns=max_turns, runtime=runtime, + custom_message_types=custom_message_types, ) def _create_group_chat_manager_factory( @@ -182,9 +186,10 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> Callable[[], RoundRobinGroupChatManager]: def _factory() -> RoundRobinGroupChatManager: return RoundRobinGroupChatManager( @@ -197,6 +202,7 @@ def _factory() -> RoundRobinGroupChatManager: output_message_queue, termination_condition, max_turns, + message_factory, ) return _factory diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 3d346c9a6c02..8ffe8da30784 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -13,11 +13,10 @@ from ...agents import BaseChatAgent from ...base import ChatAgent, TerminationCondition from ...messages import ( - AgentEvent, - BaseAgentEvent, - ChatMessage, + BaseChatMessage, + BaseMessage, + MessageFactory, MultiModalMessage, - StructuredMessage, ) from ...state import SelectorManagerState from ._base_group_chat import BaseGroupChat @@ -26,12 +25,12 @@ trace_logger = logging.getLogger(TRACE_LOGGER_NAME) -SyncSelectorFunc = Callable[[Sequence[AgentEvent | ChatMessage]], str | None] -AsyncSelectorFunc = Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[str | None]] +SyncSelectorFunc = Callable[[Sequence[BaseMessage]], str | None] +AsyncSelectorFunc = Callable[[Sequence[BaseMessage]], Awaitable[str | None]] SelectorFuncType = Union[SyncSelectorFunc | AsyncSelectorFunc] -SyncCandidateFunc = Callable[[Sequence[AgentEvent | ChatMessage]], List[str]] -AsyncCandidateFunc = Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[List[str]]] +SyncCandidateFunc = Callable[[Sequence[BaseMessage]], List[str]] +AsyncCandidateFunc = Callable[[Sequence[BaseMessage]], Awaitable[List[str]]] CandidateFuncType = Union[SyncCandidateFunc | AsyncCandidateFunc] @@ -47,9 +46,10 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, model_client: ChatCompletionClient, selector_prompt: str, allow_repeated_speaker: bool, @@ -67,6 +67,7 @@ def __init__( output_message_queue, termination_condition, max_turns, + message_factory, ) self._model_client = model_client self._selector_prompt = selector_prompt @@ -78,7 +79,7 @@ def __init__( self._candidate_func = candidate_func self._is_candidate_func_async = iscoroutinefunction(self._candidate_func) - async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: pass async def reset(self) -> None: @@ -90,7 +91,7 @@ async def reset(self) -> None: async def save_state(self) -> Mapping[str, Any]: state = SelectorManagerState( - message_thread=list(self._message_thread), + message_thread=[msg.model_dump() for msg in self._message_thread], current_turn=self._current_turn, previous_speaker=self._previous_speaker, ) @@ -98,11 +99,11 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: selector_state = SelectorManagerState.model_validate(state) - self._message_thread = list(selector_state.message_thread) + self._message_thread = [self._message_factory.create(msg) for msg in selector_state.message_thread] self._current_turn = selector_state.current_turn self._previous_speaker = selector_state.previous_speaker - async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: + async def select_speaker(self, thread: List[BaseMessage]) -> str: """Selects the next speaker in a group chat using a ChatCompletion client, with the selector function as override if it returns a speaker name. @@ -153,23 +154,14 @@ async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: # Construct the history of the conversation. history_messages: List[str] = [] for msg in thread: - if isinstance(msg, BaseAgentEvent): - # Ignore agent events. + if not isinstance(msg, BaseChatMessage): + # Only process chat messages. continue message = f"{msg.source}:" - if isinstance(msg.content, str): - message += f" {msg.content}" - elif isinstance(msg, MultiModalMessage): - for item in msg.content: - if isinstance(item, str): - message += f" {item}" - else: - message += " [Image]" - elif isinstance(msg, StructuredMessage): - serialized_content = msg.content.model_dump_json() - message += f" {serialized_content}" + if isinstance(msg, MultiModalMessage): + message += f" {msg.content_to_str(image_placeholder="[image]")}" else: - raise ValueError(f"Unexpected message type in selector: {type(msg)}") + message += f" {msg.content_to_str()}" history_messages.append( message.rstrip() + "\n\n" ) # Create some consistency for how messages are separated in the transcript @@ -312,11 +304,11 @@ class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]): max_selector_attempts (int, optional): The maximum number of attempts to select a speaker using the model. Defaults to 3. If the model fails to select a speaker after the maximum number of attempts, the previous speaker will be used if available, otherwise the first participant will be used. - selector_func (Callable[[Sequence[AgentEvent | ChatMessage]], str | None], Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[str | None]], optional): A custom selector + selector_func (Callable[[Sequence[BaseMessage]], str | None], Callable[[Sequence[BaseMessage]], Awaitable[str | None]], optional): A custom selector function that takes the conversation history and returns the name of the next speaker. If provided, this function will be used to override the model to select the next speaker. If the function returns None, the model will be used to select the next speaker. - candidate_func (Callable[[Sequence[AgentEvent | ChatMessage]], List[str]], Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[List[str]]], optional): + candidate_func (Callable[[Sequence[BaseMessage]], List[str]], Callable[[Sequence[BaseMessage]], Awaitable[List[str]]], optional): A custom function that takes the conversation history and returns a filtered list of candidates for the next speaker selection using model. If the function returns an empty list or `None`, `SelectorGroupChat` will raise a `ValueError`. This function is only used if `selector_func` is not set. The `allow_repeated_speaker` will be ignored if set. @@ -417,7 +409,7 @@ def check_calculation(x: int, y: int, answer: int) -> str: system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: + def selector_func(messages: Sequence[BaseMessage]) -> str | None: if len(messages) == 1 or messages[-1].content == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": @@ -461,6 +453,7 @@ def __init__( max_selector_attempts: int = 3, selector_func: Optional[SelectorFuncType] = None, candidate_func: Optional[CandidateFuncType] = None, + custom_message_types: List[type[BaseMessage]] | None = None, ): super().__init__( participants, @@ -469,6 +462,7 @@ def __init__( termination_condition=termination_condition, max_turns=max_turns, runtime=runtime, + custom_message_types=custom_message_types, ) # Validate the participants. if len(participants) < 2: @@ -488,9 +482,10 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> Callable[[], BaseGroupChatManager]: return lambda: SelectorGroupChatManager( name, @@ -502,6 +497,7 @@ def _create_group_chat_manager_factory( output_message_queue, termination_condition, max_turns, + message_factory, self._model_client, self._selector_prompt, self._allow_repeated_speaker, @@ -534,7 +530,7 @@ def _from_config(cls, config: SelectorGroupChatConfig) -> Self: selector_prompt=config.selector_prompt, allow_repeated_speaker=config.allow_repeated_speaker, max_selector_attempts=config.max_selector_attempts, - # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[AgentEvent | ChatMessage]], str | None]) + # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[BaseMessage]], str | None]) # if config.selector_func # else None, ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index f28965712b71..12acdd83c1e2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -5,7 +5,7 @@ from pydantic import BaseModel from ...base import ChatAgent, TerminationCondition -from ...messages import AgentEvent, ChatMessage, HandoffMessage +from ...messages import BaseChatMessage, BaseMessage, HandoffMessage, MessageFactory from ...state import SwarmManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -23,9 +23,10 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> None: super().__init__( name, @@ -37,10 +38,11 @@ def __init__( output_message_queue, termination_condition, max_turns, + message_factory, ) self._current_speaker = self._participant_names[0] - async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: """Validate the start messages for the group chat.""" # Check if any of the start messages is a handoff message. if messages: @@ -75,7 +77,7 @@ async def reset(self) -> None: await self._termination_condition.reset() self._current_speaker = self._participant_names[0] - async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: + async def select_speaker(self, thread: List[BaseMessage]) -> str: """Select a speaker from the participants based on handoff message. Looks for the last handoff message in the thread to determine the next speaker.""" if len(thread) == 0: @@ -90,7 +92,7 @@ async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: async def save_state(self) -> Mapping[str, Any]: state = SwarmManagerState( - message_thread=list(self._message_thread), + message_thread=[msg.model_dump() for msg in self._message_thread], current_turn=self._current_turn, current_speaker=self._current_speaker, ) @@ -98,7 +100,7 @@ async def save_state(self) -> Mapping[str, Any]: async def load_state(self, state: Mapping[str, Any]) -> None: swarm_state = SwarmManagerState.model_validate(state) - self._message_thread = list(swarm_state.message_thread) + self._message_thread = [self._message_factory.create(message) for message in swarm_state.message_thread] self._current_turn = swarm_state.current_turn self._current_speaker = swarm_state.current_speaker @@ -210,6 +212,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, + custom_message_types: List[type[BaseMessage]] | None = None, ) -> None: super().__init__( participants, @@ -218,6 +221,7 @@ def __init__( termination_condition=termination_condition, max_turns=max_turns, runtime=runtime, + custom_message_types=custom_message_types, ) # The first participant must be able to produce handoff messages. first_participant = self._participants[0] @@ -232,9 +236,10 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, + message_factory: MessageFactory, ) -> Callable[[], SwarmGroupChatManager]: def _factory() -> SwarmGroupChatManager: return SwarmGroupChatManager( @@ -247,6 +252,7 @@ def _factory() -> SwarmGroupChatManager: output_message_queue, termination_condition, max_turns, + message_factory, ) return _factory diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 5e422a4e68fb..919b843aae22 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -5,17 +5,15 @@ from inspect import iscoroutinefunction from typing import AsyncGenerator, Awaitable, Callable, Dict, List, Optional, TypeVar, Union, cast -from autogen_core import CancellationToken, Image +from autogen_core import CancellationToken from autogen_core.models import RequestUsage from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import Response, TaskResult from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseMessage, ModelClientStreamingChunkEvent, MultiModalMessage, - StructuredMessage, UserInputRequestedEvent, ) @@ -81,7 +79,7 @@ def aprint(output: str, end: str = "\n", flush: bool = False) -> Awaitable[None] async def Console( - stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], + stream: AsyncGenerator[BaseMessage | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -98,7 +96,7 @@ async def Console( It will be improved in future releases. Args: - stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. + stream (AsyncGenerator[BaseMessage | TaskResult, None] | AsyncGenerator[BaseMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. @@ -136,7 +134,11 @@ async def Console( duration = time.time() - start_time # Print final response. - output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n" + if isinstance(message.chat_message, MultiModalMessage): + final_content = message.chat_message.content_to_render(iterm=render_image_iterm) + else: + final_content = message.chat_message.content_to_render() + output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{final_content}\n" if message.chat_message.models_usage: if output_stats: output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" @@ -167,21 +169,22 @@ async def Console( user_input_manager.notify_event_received(message.request_id) else: # Cast required for mypy to be happy - message = cast(AgentEvent | ChatMessage, message) # type: ignore + message = cast(BaseMessage, message) # type: ignore if not streaming_chunks: # Print message sender. await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True) if isinstance(message, ModelClientStreamingChunkEvent): - await aprint(message.content, end="") + await aprint(message.content_to_render(), end="") streaming_chunks.append(message.content) else: if streaming_chunks: streaming_chunks.clear() # Chunked messages are already printed, so we just print a newline. await aprint("", end="\n", flush=True) + elif isinstance(message, MultiModalMessage): + await aprint(message.content_to_render(iterm=render_image_iterm), end="\n", flush=True) else: - # Print message content. - await aprint(_message_to_str(message, render_image_iterm=render_image_iterm), end="\n", flush=True) + await aprint(message.content_to_render(), end="\n", flush=True) if message.models_usage: if output_stats: await aprint( @@ -196,28 +199,3 @@ async def Console( raise ValueError("No TaskResult or Response was processed.") return last_processed - - -# iTerm2 image rendering protocol: https://iterm2.com/documentation-images.html -def _image_to_iterm(image: Image) -> str: - image_data = image.to_base64() - return f"\033]1337;File=inline=1:{image_data}\a\n" - - -def _message_to_str(message: AgentEvent | ChatMessage, *, render_image_iterm: bool = False) -> str: - if isinstance(message, MultiModalMessage): - result: List[str] = [] - for c in message.content: - if isinstance(c, str): - result.append(c) - else: - if render_image_iterm: - result.append(_image_to_iterm(c)) - else: - result.append("") - return "\n".join(result) - elif isinstance(message, StructuredMessage): - # If the message is a StructuredMessage, we want to print the content as JSON. - return message.content.model_dump_json(indent=2) - else: - return f"{message.content}" diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 15ee8c873f40..40bb440667df 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -7,7 +7,7 @@ from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import Handoff, TaskResult from autogen_agentchat.messages import ( - ChatMessage, + BaseChatMessage, HandoffMessage, MemoryQueryEvent, ModelClientStreamingChunkEvent, @@ -585,7 +585,7 @@ async def test_list_chat_messages(monkeypatch: pytest.MonkeyPatch) -> None: ) # Create a list of chat messages - messages: List[ChatMessage] = [ + messages: List[BaseChatMessage] = [ TextMessage(content="Message 1", source="user"), TextMessage(content="Message 2", source="user"), ] diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index ac8c4ea482c8..5707d9a2f33b 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -15,8 +15,8 @@ from autogen_agentchat.base import Handoff, Response, TaskResult from autogen_agentchat.conditions import HandoffTermination, MaxMessageTermination, TextMentionTermination from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseChatMessage, + BaseMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -60,14 +60,14 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) > 0: assert isinstance(messages[0], TextMessage) self._last_message = messages[0].content @@ -89,14 +89,14 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: raise ValueError("I am a flaky agent...") async def on_reset(self, cancellation_token: CancellationToken) -> None: @@ -110,10 +110,10 @@ def __init__(self, name: str, description: str, *, stop_at: int = 1) -> None: self._stop_at = stop_at @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage, StopMessage) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: self._count += 1 if self._count < self._stop_at: return await super().on_messages(messages, cancellation_token) @@ -134,7 +134,7 @@ class InputTask2(BaseModel): data: str -TaskType = str | List[ChatMessage] | ChatMessage +TaskType = str | List[BaseChatMessage] | BaseChatMessage @pytest_asyncio.fixture(params=["single_threaded", "embedded"]) # type: ignore @@ -742,7 +742,7 @@ async def test_selector_group_chat_custom_selector(runtime: AgentRuntime | None) agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _select_agent(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: + def _select_agent(messages: Sequence[BaseMessage]) -> str | None: if len(messages) == 0: return "agent1" elif messages[-1].source == "agent1": @@ -783,7 +783,7 @@ async def test_selector_group_chat_custom_candidate_func(runtime: AgentRuntime | agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]: + def _candidate_func(messages: Sequence[BaseMessage]) -> List[str]: if len(messages) == 0: return ["agent1"] elif messages[-1].source == "agent1": @@ -822,10 +822,10 @@ def __init__(self, name: str, description: str, next_agent: str) -> None: self._next_agent = next_agent @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (HandoffMessage,) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: return Response( chat_message=HandoffMessage( content=f"Transferred to {self._next_agent}.", target=self._next_agent, source=self.name @@ -1169,7 +1169,7 @@ async def test_round_robin_group_chat_with_message_list(runtime: AgentRuntime | team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination, runtime=runtime) # Create a list of messages - messages: List[ChatMessage] = [ + messages: List[BaseChatMessage] = [ TextMessage(content="Message 1", source="user"), TextMessage(content="Message 2", source="user"), TextMessage(content="Message 3", source="user"), @@ -1197,7 +1197,7 @@ async def test_round_robin_group_chat_with_message_list(runtime: AgentRuntime | index += 1 # Test with invalid message list - with pytest.raises(ValueError, match="All messages in task list must be valid ChatMessage types"): + with pytest.raises(ValueError, match="All messages in task list must be valid BaseChatMessage types"): await team.run(task=["not a message"]) # type: ignore[list-item, arg-type] # intentionally testing invalid input # Test with empty message list diff --git a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py index 390a45e031f4..54a14efebd64 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py @@ -4,10 +4,7 @@ import pytest from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, -) +from autogen_agentchat.messages import BaseMessage from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.ui import Console from autogen_core.models import ChatCompletionClient @@ -36,7 +33,7 @@ async def _test_selector_group_chat(model_client: ChatCompletionClient) -> None: async def _test_selector_group_chat_with_candidate_func(model_client: ChatCompletionClient) -> None: filtered_participants = ["developer", "tester"] - def dummy_candidate_func(thread: Sequence[AgentEvent | ChatMessage]) -> List[str]: + def dummy_candidate_func(thread: Sequence[BaseMessage]) -> List[str]: # Dummy candidate function that will return # only return developer and reviewer return filtered_participants diff --git a/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py b/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py index ee87f6f4a25b..e26c7262d66d 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py @@ -5,7 +5,7 @@ import pytest_asyncio from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import ChatMessage, TextMessage +from autogen_agentchat.messages import BaseChatMessage, TextMessage from autogen_agentchat.teams import RoundRobinGroupChat from autogen_core import AgentRuntime, CancellationToken, SingleThreadedAgentRuntime @@ -20,10 +20,10 @@ def __init__(self, name: str, description: str) -> None: self.counter = 0 @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return [TextMessage] - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: assert not self._is_paused, "Agent is paused" async def _process() -> None: diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index 34228ba039f7..51efa761de86 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -11,7 +11,7 @@ ) from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - ChatMessage, + BaseChatMessage, TextMessage, ) from autogen_agentchat.teams import ( @@ -34,14 +34,14 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) > 0: assert isinstance(messages[0], TextMessage) self._last_message = messages[0].content diff --git a/python/packages/autogen-agentchat/tests/test_messages.py b/python/packages/autogen-agentchat/tests/test_messages.py index 2777e9a7de8a..518a875d2266 100644 --- a/python/packages/autogen-agentchat/tests/test_messages.py +++ b/python/packages/autogen-agentchat/tests/test_messages.py @@ -1,4 +1,5 @@ -from autogen_agentchat.messages import StructuredMessage +import pytest +from autogen_agentchat.messages import HandoffMessage, MessageFactory, StructuredMessage, TextMessage from pydantic import BaseModel @@ -17,14 +18,11 @@ def test_structured_message() -> None: ) # Check that the message type is correct - assert message.type == "StructuredMessage" + assert message.type == "StructuredMessage[TestContent]" # Check that the content is of the correct type assert isinstance(message.content, TestContent) - # Check that the content class is set correctly - assert message.content_class_path == "test_messages.TestContent" - # Check that the content fields are set correctly assert message.content.field1 == "test" assert message.content.field2 == 42 @@ -32,29 +30,64 @@ def test_structured_message() -> None: # Check that model_dump works correctly dumped_message = message.model_dump() assert dumped_message["source"] == "test_agent" - assert dumped_message["content_class_path"] == "test_messages.TestContent" assert dumped_message["content"]["field1"] == "test" assert dumped_message["content"]["field2"] == 42 + assert dumped_message["type"] == "StructuredMessage[TestContent]" - # Check that model_validate works correctly - validated_message = StructuredMessage[TestContent].model_validate(dumped_message) - assert validated_message.source == "test_agent" - assert isinstance(validated_message.content, TestContent) - assert validated_message.content_class_path == "test_messages.TestContent" - assert validated_message.content.field1 == "test" - assert validated_message.content.field2 == 42 - - # Check that the dump method works correctly - dumped_message = message.dump() - assert dumped_message["type"] == "StructuredMessage" - assert dumped_message["content_class_path"] == "test_messages.TestContent" - assert dumped_message["content"]["field1"] == "test" - assert dumped_message["content"]["field2"] == 42 - # Check that the load method works correctly for dynamic deserialization. - message2 = StructuredMessage[BaseModel].load(dumped_message) - assert message2.type == "StructuredMessage" - assert isinstance(message2.content, TestContent) - assert message2.content_class_path == "test_messages.TestContent" - assert message2.content.field1 == "test" - assert message2.content.field2 == 42 +def test_message_factory() -> None: + factory = MessageFactory() + + # Text message data + text_data = { + "type": "TextMessage", + "source": "test_agent", + "content": "Hello, world!", + } + + # Create a TextMessage instance + text_message = factory.create(text_data) + assert isinstance(text_message, TextMessage) + assert text_message.source == "test_agent" + assert text_message.content == "Hello, world!" + assert text_message.type == "TextMessage" + + # Handoff message data + handoff_data = { + "type": "HandoffMessage", + "source": "test_agent", + "content": "handoff to another agent", + "target": "target_agent", + } + + # Create a HandoffMessage instance + handoff_message = factory.create(handoff_data) + assert isinstance(handoff_message, HandoffMessage) + assert handoff_message.source == "test_agent" + assert handoff_message.content == "handoff to another agent" + assert handoff_message.target == "target_agent" + assert handoff_message.type == "HandoffMessage" + + # Structured message data + structured_data = { + "type": "StructuredMessage[TestContent]", + "source": "test_agent", + "content": { + "field1": "test", + "field2": 42, + }, + } + # Create a StructuredMessage instance -- this will fail because the type + # is not registered in the factory. + with pytest.raises(ValueError): + structured_message = factory.create(structured_data) + # Register the StructuredMessage type in the factory + factory.register(StructuredMessage[TestContent]) + # Create a StructuredMessage instance + structured_message = factory.create(structured_data) + assert isinstance(structured_message, StructuredMessage) + assert isinstance(structured_message.content, TestContent) # type: ignore + assert structured_message.source == "test_agent" + assert structured_message.content.field1 == "test" + assert structured_message.content.field2 == 42 + assert structured_message.type == "StructuredMessage[TestContent]" diff --git a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py index 8ff6422a696f..855211de82a9 100644 --- a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py +++ b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py @@ -4,7 +4,7 @@ import pytest from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import ChatMessage, HandoffMessage, TextMessage +from autogen_agentchat.messages import BaseChatMessage, HandoffMessage, TextMessage from autogen_core import CancellationToken @@ -53,7 +53,7 @@ def custom_input(prompt: str) -> str: agent = UserProxyAgent(name="test_user", input_func=custom_input) - messages: Sequence[ChatMessage] = [ + messages: Sequence[BaseChatMessage] = [ TextMessage(content="Initial message", source="assistant"), HandoffMessage(content="Handing off to user for confirmation", source="assistant", target="test_user"), ] diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index c3d8ed4d4b51..5748178db669 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -1,747 +1,747 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Custom Agents\n", - "\n", - "You may have agents with behaviors that do not fall into a preset. \n", - "In such cases, you can build custom agents.\n", - "\n", - "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", - "class and implement the following abstract methods and attributes:\n", - "\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", - "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", - "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", - "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", - "yields all messages in the response." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CountDownAgent\n", - "\n", - "In this example, we create a simple agent that counts down from a given number to zero,\n", - "and produces a stream of messages with the current count." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3...\n", - "2...\n", - "1...\n", - "Done!\n" - ] - } - ], - "source": [ - "from typing import AsyncGenerator, List, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n", - "from autogen_core import CancellationToken\n", - "\n", - "\n", - "class CountDownAgent(BaseChatAgent):\n", - " def __init__(self, name: str, count: int = 3):\n", - " super().__init__(name, \"A simple agent that counts down.\")\n", - " self._count = count\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Calls the on_messages_stream.\n", - " response: Response | None = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " response = message\n", - " assert response is not None\n", - " return response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " inner_messages: List[AgentEvent | ChatMessage] = []\n", - " for i in range(self._count, 0, -1):\n", - " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", - " inner_messages.append(msg)\n", - " yield msg\n", - " # The response is returned at the end of the stream.\n", - " # It contains the final message and all the inner messages.\n", - " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_countdown_agent() -> None:\n", - " # Create a countdown agent.\n", - " countdown_agent = CountDownAgent(\"countdown\")\n", - "\n", - " # Run the agent with a given task and stream the response.\n", - " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", - " if isinstance(message, Response):\n", - " print(message.chat_message.content)\n", - " else:\n", - " print(message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", - "await run_countdown_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ArithmeticAgent\n", - "\n", - "In this example, we create an agent class that can perform simple arithmetic operations\n", - "on a given integer. Then, we will use different instances of this agent class\n", - "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", - "\n", - "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", - "after applying an arithmetic operation to the integer.\n", - "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", - "and returns a response with the result." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Callable, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "\n", - "class ArithmeticAgent(BaseChatAgent):\n", - " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", - " super().__init__(name, description=description)\n", - " self._operator_func = operator_func\n", - " self._message_history: List[ChatMessage] = []\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Update the message history.\n", - " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", - " self._message_history.extend(messages)\n", - " # Parse the number in the last message.\n", - " assert isinstance(self._message_history[-1], TextMessage)\n", - " number = int(self._message_history[-1].content)\n", - " # Apply the operator function to the number.\n", - " result = self._operator_func(number)\n", - " # Create a new message with the result.\n", - " response_message = TextMessage(content=str(result), source=self.name)\n", - " # Update the message history.\n", - " self._message_history.append(response_message)\n", - " # Return the response.\n", - " return Response(chat_message=response_message)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "The `on_messages` method may be called with an empty list of messages, in which\n", - "case it means the agent was called previously and is now being called again,\n", - "without any new messages from the caller. So it is important to keep a history\n", - "of the previous messages received by the agent, and use that history to generate\n", - "the response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", - "\n", - "- one that adds 1 to the input integer,\n", - "- one that subtracts 1 from the input integer,\n", - "- one that multiplies the input integer by 2,\n", - "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", - "- one that returns the input integer unchanged.\n", - "\n", - "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", - "and set the appropriate selector settings:\n", - "\n", - "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", - "- customize the selector prompt to tailor the model's response to the specific task." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Apply the operations to turn the given number into 25.\n", - "---------- user ----------\n", - "10\n", - "---------- multiply_agent ----------\n", - "20\n", - "---------- add_agent ----------\n", - "21\n", - "---------- multiply_agent ----------\n", - "42\n", - "---------- divide_agent ----------\n", - "21\n", - "---------- add_agent ----------\n", - "22\n", - "---------- add_agent ----------\n", - "23\n", - "---------- add_agent ----------\n", - "24\n", - "---------- add_agent ----------\n", - "25\n", - "---------- Summary ----------\n", - "Number of messages: 10\n", - "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", - "Total prompt tokens: 0\n", - "Total completion tokens: 0\n", - "Duration: 2.40 seconds\n" - ] - } - ], - "source": [ - "async def run_number_agents() -> None:\n", - " # Create agents for number operations.\n", - " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", - " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", - " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", - " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", - " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", - "\n", - " # The termination condition is to stop after 10 messages.\n", - " termination_condition = MaxMessageTermination(10)\n", - "\n", - " # Create a selector group chat.\n", - " selector_group_chat = SelectorGroupChat(\n", - " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " termination_condition=termination_condition,\n", - " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", - " selector_prompt=(\n", - " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", - " \"Current conversation history:\\n{history}\\n\"\n", - " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", - " ),\n", - " )\n", - "\n", - " # Run the selector group chat with a given task and stream the response.\n", - " task: List[ChatMessage] = [\n", - " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", - " TextMessage(content=\"10\", source=\"user\"),\n", - " ]\n", - " stream = selector_group_chat.run_stream(task=task)\n", - " await Console(stream)\n", - "\n", - "\n", - "# Use asyncio.run(run_number_agents()) when running in a script.\n", - "await run_number_agents()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the output, we can see that the agents have successfully transformed the input integer\n", - "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Custom Model Clients in Custom Agents\n", - "\n", - "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", - "\n", - "You can accomplish this with a custom agent that implements *your custom model client*.\n", - "\n", - "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", - "\n", - "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", - "\n", - "```bash\n", - "pip install google-genai\n", - "``` " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install google-genai\n", - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, StructuredMessage\n", - "from autogen_core import CancellationToken\n", - "from autogen_core.model_context import UnboundedChatCompletionContext\n", - "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", - "from google import genai\n", - "from google.genai import types\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent):\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What is the capital of New York?\n", - "---------- gemini_assistant ----------\n", - "Albany\n", - "TERMINATE\n", - "\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Custom Agents\n", + "\n", + "You may have agents with behaviors that do not fall into a preset. \n", + "In such cases, you can build custom agents.\n", + "\n", + "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", + "class and implement the following abstract methods and attributes:\n", + "\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n", + "\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", + "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", + "yields all messages in the response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CountDownAgent\n", + "\n", + "In this example, we create a simple agent that counts down from a given number to zero,\n", + "and produces a stream of messages with the current count." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3...\n", + "2...\n", + "1...\n", + "Done!\n" + ] + } + ], + "source": [ + "from typing import AsyncGenerator, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, TextMessage\n", + "from autogen_core import CancellationToken\n", + "\n", + "\n", + "class CountDownAgent(BaseChatAgent):\n", + " def __init__(self, name: str, count: int = 3):\n", + " super().__init__(name, \"A simple agent that counts down.\")\n", + " self._count = count\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Calls the on_messages_stream.\n", + " response: Response | None = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " response = message\n", + " assert response is not None\n", + " return response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " inner_messages: List[BaseMessage] = []\n", + " for i in range(self._count, 0, -1):\n", + " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", + " inner_messages.append(msg)\n", + " yield msg\n", + " # The response is returned at the end of the stream.\n", + " # It contains the final message and all the inner messages.\n", + " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_countdown_agent() -> None:\n", + " # Create a countdown agent.\n", + " countdown_agent = CountDownAgent(\"countdown\")\n", + "\n", + " # Run the agent with a given task and stream the response.\n", + " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", + " if isinstance(message, Response):\n", + " print(message.chat_message.content)\n", + " else:\n", + " print(message.content)\n", + "\n", + "\n", + "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", + "await run_countdown_agent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ArithmeticAgent\n", + "\n", + "In this example, we create an agent class that can perform simple arithmetic operations\n", + "on a given integer. Then, we will use different instances of this agent class\n", + "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", + "\n", + "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", + "after applying an arithmetic operation to the integer.\n", + "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", + "and returns a response with the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Callable, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.conditions import MaxMessageTermination\n", + "from autogen_agentchat.messages import BaseChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class ArithmeticAgent(BaseChatAgent):\n", + " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", + " super().__init__(name, description=description)\n", + " self._operator_func = operator_func\n", + " self._message_history: List[BaseChatMessage] = []\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Update the message history.\n", + " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", + " self._message_history.extend(messages)\n", + " # Parse the number in the last message.\n", + " assert isinstance(self._message_history[-1], TextMessage)\n", + " number = int(self._message_history[-1].content)\n", + " # Apply the operator function to the number.\n", + " result = self._operator_func(number)\n", + " # Create a new message with the result.\n", + " response_message = TextMessage(content=str(result), source=self.name)\n", + " # Update the message history.\n", + " self._message_history.append(response_message)\n", + " # Return the response.\n", + " return Response(chat_message=response_message)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "The `on_messages` method may be called with an empty list of messages, in which\n", + "case it means the agent was called previously and is now being called again,\n", + "without any new messages from the caller. So it is important to keep a history\n", + "of the previous messages received by the agent, and use that history to generate\n", + "the response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", + "\n", + "- one that adds 1 to the input integer,\n", + "- one that subtracts 1 from the input integer,\n", + "- one that multiplies the input integer by 2,\n", + "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", + "- one that returns the input integer unchanged.\n", + "\n", + "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", + "and set the appropriate selector settings:\n", + "\n", + "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", + "- customize the selector prompt to tailor the model's response to the specific task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Apply the operations to turn the given number into 25.\n", + "---------- user ----------\n", + "10\n", + "---------- multiply_agent ----------\n", + "20\n", + "---------- add_agent ----------\n", + "21\n", + "---------- multiply_agent ----------\n", + "42\n", + "---------- divide_agent ----------\n", + "21\n", + "---------- add_agent ----------\n", + "22\n", + "---------- add_agent ----------\n", + "23\n", + "---------- add_agent ----------\n", + "24\n", + "---------- add_agent ----------\n", + "25\n", + "---------- Summary ----------\n", + "Number of messages: 10\n", + "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", + "Total prompt tokens: 0\n", + "Total completion tokens: 0\n", + "Duration: 2.40 seconds\n" + ] + } + ], + "source": [ + "async def run_number_agents() -> None:\n", + " # Create agents for number operations.\n", + " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", + " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", + " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", + " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", + " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", + "\n", + " # The termination condition is to stop after 10 messages.\n", + " termination_condition = MaxMessageTermination(10)\n", + "\n", + " # Create a selector group chat.\n", + " selector_group_chat = SelectorGroupChat(\n", + " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " termination_condition=termination_condition,\n", + " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", + " selector_prompt=(\n", + " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", + " \"Current conversation history:\\n{history}\\n\"\n", + " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", + " ),\n", + " )\n", + "\n", + " # Run the selector group chat with a given task and stream the response.\n", + " task: List[BaseChatMessage] = [\n", + " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", + " TextMessage(content=\"10\", source=\"user\"),\n", + " ]\n", + " stream = selector_group_chat.run_stream(task=task)\n", + " await Console(stream)\n", + "\n", + "\n", + "# Use asyncio.run(run_number_agents()) when running in a script.\n", + "await run_number_agents()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the output, we can see that the agents have successfully transformed the input integer\n", + "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Custom Model Clients in Custom Agents\n", + "\n", + "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", + "\n", + "You can accomplish this with a custom agent that implements *your custom model client*.\n", + "\n", + "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", + "\n", + "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", + "\n", + "```bash\n", + "pip install google-genai\n", + "``` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install google-genai\n", + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, StructuredMessage, BaseMessage\n", + "from autogen_core import CancellationToken\n", + "from autogen_core.model_context import UnboundedChatCompletionContext\n", + "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", + "from google import genai\n", + "from google.genai import types\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent):\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What is the capital of New York?\n", + "---------- gemini_assistant ----------\n", + "Albany\n", + "TERMINATE\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", + "\n", + "Now, let us explore how to use this custom agent as part of a team in AgentChat." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a Haiku poem with 4 lines about the fall season.\n", + "---------- primary ----------\n", + "Crimson leaves cascade, \n", + "Whispering winds sing of change, \n", + "Chill wraps the fading, \n", + "Nature's quilt, rich and warm.\n", + "---------- gemini_critic ----------\n", + "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", + "\n", + "---------- primary ----------\n", + "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", + "\n", + "Crimson leaves drift down, \n", + "Chill winds whisper through the gold, \n", + "Autumn’s breath is near.\n", + "---------- gemini_critic ----------\n", + "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create a critic agent based on our new GeminiAssistantAgent.\n", + "gemini_critic_agent = GeminiAssistantAgent(\n", + " \"gemini_critic\",\n", + " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")\n", + "\n", + "\n", + "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", + "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", + "\n", + "# Create a team with the primary and critic agents.\n", + "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", + "\n", + "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In section above, we show several very important concepts:\n", + "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", + "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Making the Custom Agent Declarative \n", + "\n", + "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", + "\n", + "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", + "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, BaseMessage\n", + "from autogen_core import CancellationToken, Component\n", + "from pydantic import BaseModel\n", + "from typing_extensions import Self\n", + "\n", + "\n", + "class GeminiAssistantAgentConfig(BaseModel):\n", + " name: str\n", + " description: str = \"An agent that provides assistance with ability to use tools.\"\n", + " model: str = \"gemini-1.5-flash-002\"\n", + " system_message: str | None = None\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", + " component_config_schema = GeminiAssistantAgentConfig\n", + " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", + "\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + "\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()\n", + "\n", + " @classmethod\n", + " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", + " return cls(\n", + " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", + " )\n", + "\n", + " def _to_config(self) -> GeminiAssistantAgentConfig:\n", + " return GeminiAssistantAgentConfig(\n", + " name=self.name,\n", + " description=self.description,\n", + " model=self._model,\n", + " system_message=self._system_message,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", + " \n", + " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"provider\": \"__main__.GeminiAssistantAgent\",\n", + " \"component_type\": \"agent\",\n", + " \"version\": 1,\n", + " \"component_version\": 1,\n", + " \"description\": null,\n", + " \"label\": \"GeminiAssistantAgent\",\n", + " \"config\": {\n", + " \"name\": \"gemini_assistant\",\n", + " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", + " \"model\": \"gemini-1.5-flash-002\",\n", + " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", + " }\n", + "}\n", + "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" + ] + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "config = gemini_assistant.dump_component()\n", + "print(config.model_dump_json(indent=2))\n", + "loaded_agent = GeminiAssistantAgent.load_component(config)\n", + "print(loaded_agent)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps \n", + "\n", + "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", + "\n", + "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", + "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", - "\n", - "Now, let us explore how to use this custom agent as part of a team in AgentChat." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a Haiku poem with 4 lines about the fall season.\n", - "---------- primary ----------\n", - "Crimson leaves cascade, \n", - "Whispering winds sing of change, \n", - "Chill wraps the fading, \n", - "Nature's quilt, rich and warm.\n", - "---------- gemini_critic ----------\n", - "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", - "\n", - "---------- primary ----------\n", - "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", - "\n", - "Crimson leaves drift down, \n", - "Chill winds whisper through the gold, \n", - "Autumn’s breath is near.\n", - "---------- gemini_critic ----------\n", - "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 39, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create a critic agent based on our new GeminiAssistantAgent.\n", - "gemini_critic_agent = GeminiAssistantAgent(\n", - " \"gemini_critic\",\n", - " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")\n", - "\n", - "\n", - "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", - "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", - "\n", - "# Create a team with the primary and critic agents.\n", - "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", - "\n", - "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In section above, we show several very important concepts:\n", - "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", - "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Making the Custom Agent Declarative \n", - "\n", - "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", - "\n", - "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", - "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_core import CancellationToken, Component\n", - "from pydantic import BaseModel\n", - "from typing_extensions import Self\n", - "\n", - "\n", - "class GeminiAssistantAgentConfig(BaseModel):\n", - " name: str\n", - " description: str = \"An agent that provides assistance with ability to use tools.\"\n", - " model: str = \"gemini-1.5-flash-002\"\n", - " system_message: str | None = None\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", - " component_config_schema = GeminiAssistantAgentConfig\n", - " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", - "\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - "\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()\n", - "\n", - " @classmethod\n", - " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", - " return cls(\n", - " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", - " )\n", - "\n", - " def _to_config(self) -> GeminiAssistantAgentConfig:\n", - " return GeminiAssistantAgentConfig(\n", - " name=self.name,\n", - " description=self.description,\n", - " model=self._model,\n", - " system_message=self._system_message,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", - " \n", - " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"provider\": \"__main__.GeminiAssistantAgent\",\n", - " \"component_type\": \"agent\",\n", - " \"version\": 1,\n", - " \"component_version\": 1,\n", - " \"description\": null,\n", - " \"label\": \"GeminiAssistantAgent\",\n", - " \"config\": {\n", - " \"name\": \"gemini_assistant\",\n", - " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", - " \"model\": \"gemini-1.5-flash-002\",\n", - " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", - " }\n", - "}\n", - "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" - ] - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "config = gemini_assistant.dump_component()\n", - "print(config.model_dump_json(indent=2))\n", - "loaded_agent = GeminiAssistantAgent.load_component(config)\n", - "print(loaded_agent)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps \n", - "\n", - "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", - "\n", - "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", - "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index d0533435fe42..8cc2b1041b5d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -462,18 +462,18 @@ and implement the `on_messages`, `on_reset`, and `produced_message_types` method from typing import Sequence from autogen_core import CancellationToken from autogen_agentchat.agents import BaseChatAgent -from autogen_agentchat.messages import TextMessage, ChatMessage +from autogen_agentchat.messages import TextMessage, BaseChatMessage from autogen_agentchat.base import Response class CustomAgent(BaseChatAgent): - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: return Response(chat_message=TextMessage(content="Custom reply", source=self.name)) async def on_reset(self, cancellation_token: CancellationToken) -> None: pass @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) ``` @@ -742,8 +742,7 @@ You can use the following conversion functions to convert between a v0.4 message from typing import Any, Dict, List, Literal from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -757,14 +756,14 @@ from autogen_core.models import FunctionExecutionResult def convert_to_v02_message( - message: AgentEvent | ChatMessage, + message: BaseMessage, role: Literal["assistant", "user", "tool"], image_detail: Literal["auto", "high", "low"] = "auto", ) -> Dict[str, Any]: """Convert a v0.4 AgentChat message to a v0.2 message. Args: - message (AgentEvent | ChatMessage): The message to convert. + message (BaseMessage): The message to convert. role (Literal["assistant", "user", "tool"]): The role of the message. image_detail (Literal["auto", "high", "low"], optional): The detail level of image content in multi-modal message. Defaults to "auto". @@ -810,7 +809,7 @@ def convert_to_v02_message( return v02_message -def convert_to_v04_message(message: Dict[str, Any]) -> AgentEvent | ChatMessage: +def convert_to_v04_message(message: Dict[str, Any]) -> BaseMessage: """Convert a v0.2 message to a v0.4 AgentChat message.""" if "tool_calls" in message: tool_calls: List[FunctionCall] = [] @@ -1065,7 +1064,7 @@ import asyncio from typing import Sequence from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination -from autogen_agentchat.messages import AgentEvent, ChatMessage +from autogen_agentchat.messages import BaseMessage from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient @@ -1141,7 +1140,7 @@ def create_team(model_client : OpenAIChatCompletionClient) -> SelectorGroupChat: # The selector function is a function that takes the current message thread of the group chat # and returns the next speaker's name. If None is returned, the LLM-based selection method will be used. - def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: + def selector_func(messages: Sequence[BaseMessage]) -> str | None: if messages[-1].source != planning_agent.name: return planning_agent.name # Always return to the planning agent after the other agents have spoken. return None @@ -1190,12 +1189,12 @@ from typing import Sequence from autogen_core import CancellationToken from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.teams import RoundRobinGroupChat -from autogen_agentchat.messages import TextMessage, ChatMessage +from autogen_agentchat.messages import TextMessage, BaseChatMessage from autogen_agentchat.base import Response class CountingAgent(BaseChatAgent): """An agent that returns a new number by adding 1 to the last number in the input messages.""" - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) == 0: last_number = 0 # Start from 0 if no messages are given. else: @@ -1207,7 +1206,7 @@ class CountingAgent(BaseChatAgent): pass @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) class NestedCountingAgent(BaseChatAgent): @@ -1217,7 +1216,7 @@ class NestedCountingAgent(BaseChatAgent): super().__init__(name, description="An agent that counts numbers.") self._counting_team = counting_team - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: # Run the inner team with the given messages and returns the last message produced by the team. result = await self._counting_team.run(task=messages, cancellation_token=cancellation_token) # To stream the inner messages, implement `on_messages_stream` and use that to implement `on_messages`. @@ -1229,7 +1228,7 @@ class NestedCountingAgent(BaseChatAgent): await self._counting_team.reset() @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) async def main() -> None: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index 522c26b2098c..5c1e0027650a 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -1,1026 +1,1026 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Selector Group Chat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", - "\n", - "Key features include:\n", - "\n", - "- Model-based speaker selection\n", - "- Configurable participant roles and descriptions\n", - "- Prevention of consecutive turns by the same speaker (optional)\n", - "- Customizable selection prompting\n", - "- Customizable selection function to override the default model-based selection\n", - "- Customizable candidate function to narrow-down the set of agents for selection using model\n", - "\n", - "```{note}\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", - "```\n", - "\n", - "## How Does it Work?\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "but with a model-based next speaker selection mechanism.\n", - "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", - "the following steps are executed:\n", - "\n", - "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", - "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", - "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", - "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", - "\n", - "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", - "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", - "\n", - "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Web Search/Analysis" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List, Sequence\n", - "\n", - "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agents\n", - "\n", - "![Selector Group Chat](selector-group-chat.svg)\n", - "\n", - "This system uses three specialized agents:\n", - "\n", - "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", - "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", - "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", - "def search_web_tool(query: str) -> str:\n", - " if \"2006-2007\" in query:\n", - " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \"\"\"\n", - " elif \"2007-2008\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", - " elif \"2008-2009\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", - " return \"No data found.\"\n", - "\n", - "\n", - "def percentage_change_tool(start: float, end: float) -> float:\n", - " return ((end - start) / start) * 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", - "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", - "so it is recommended to provide meaningful names and descriptions." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "planning_agent = AssistantAgent(\n", - " \"PlanningAgent\",\n", - " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a planning agent.\n", - " Your job is to break down complex tasks into smaller, manageable subtasks.\n", - " Your team members are:\n", - " WebSearchAgent: Searches for information\n", - " DataAnalystAgent: Performs calculations\n", - "\n", - " You only plan and delegate tasks - you do not execute them yourself.\n", - "\n", - " When assigning tasks, use this format:\n", - " 1. : \n", - "\n", - " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", - " \"\"\",\n", - ")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a web search agent.\n", - " Your only tool is search_tool - use it to find information.\n", - " You make only one search call at a time.\n", - " Once you have the results, you never do calculations based on them.\n", - " \"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"\n", - " You are a data analyst.\n", - " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", - " If you have not seen the data, ask for it.\n", - " \"\"\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", - "tool output as the response. If your tool does not return a well-formed\n", - "string in natural language format, you may want to add a reflection step\n", - "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", - "This will allow the agent to reflect on the tool output and provide a natural\n", - "language response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Workflow\n", - "\n", - "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", - "\n", - "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", - " ` : `\n", - "\n", - "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", - "\n", - "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", - "\n", - "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", - "\n", - "6. The workflow continues with agents being dynamically selected until either:\n", - " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", - " - An alternative termination condition is met (e.g., a maximum number of messages)\n", - "\n", - "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Termination Conditions\n", - "\n", - "Let's use two termination conditions:\n", - "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", - "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", - "max_messages_termination = MaxMessageTermination(max_messages=25)\n", - "termination = text_mention_termination | max_messages_termination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Selector Prompt\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", - "the next speaker based on the conversation context.\n", - "We will use a custom selector prompt to properly align with the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "Make sure the planner agent has assigned tasks before other agents start working.\n", - "Only select one agent.\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "Try not to overload the model with too much instruction in the selector prompt.\n", - "\n", - "What is too much? It depends on the capabilities of the model you are using.\n", - "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", - "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", - "such as the one used in this example.\n", - "\n", - "Generally, if you find yourself writing multiple conditions for each agent,\n", - "it is a sign that you should consider using a custom selection function,\n", - "or breaking down the task into smaller, sequential tasks to be handled by\n", - "separate agents or teams.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Running the Team\n", - "\n", - "Let's create the team with the agents, termination conditions, and custom selector prompt." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we run the team with a task to find information about an NBA player." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To complete this task, we need to perform the following subtasks:\n", - "\n", - "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", - "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", - "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", - "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "I'll assign these tasks accordingly:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- WebSearchAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", - "\n", - "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Selector Group Chat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", + "\n", + "Key features include:\n", + "\n", + "- Model-based speaker selection\n", + "- Configurable participant roles and descriptions\n", + "- Prevention of consecutive turns by the same speaker (optional)\n", + "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "- Customizable candidate function to narrow-down the set of agents for selection using model\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", + "```\n", + "\n", + "## How Does it Work?\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", + "\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", + "\n", + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Web Search/Analysis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, Sequence\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import BaseMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " WebSearchAgent: Searches for information\n", + " DataAnalystAgent: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " If you have not seen the data, ask for it.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", + "tool output as the response. If your tool does not return a well-formed\n", + "string in natural language format, you may want to add a reflection step\n", + "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", + "This will allow the agent to reflect on the tool output and provide a natural\n", + "language response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Termination Conditions\n", + "\n", + "Let's use two termination conditions:\n", + "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Selector Prompt\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", + "the next speaker based on the conversation context.\n", + "We will use a custom selector prompt to properly align with the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "Make sure the planner agent has assigned tasks before other agents start working.\n", + "Only select one agent.\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "Try not to overload the model with too much instruction in the selector prompt.\n", + "\n", + "What is too much? It depends on the capabilities of the model you are using.\n", + "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", + "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", + "such as the one used in this example.\n", + "\n", + "Generally, if you find yourself writing multiple conditions for each agent,\n", + "it is a sign that you should consider using a custom selection function,\n", + "or breaking down the task into smaller, sequential tasks to be handled by\n", + "separate agents or teams.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Running the Team\n", + "\n", + "Let's create the team with the agents, termination conditions, and custom selector prompt." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To complete this task, we need to perform the following subtasks:\n", + "\n", + "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", + "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", + "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", + "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "I'll assign these tasks accordingly:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", + "\n", + "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process.\n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "This allows us to implement more complex selection logic and state-based transitions.\n", + "\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we need to follow these steps: \n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", + "3. Calculate the percentage change in his total rebounds between the two seasons.\n", + "\n", + "Let's delegate these tasks:\n", + "\n", + "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", + "\n", + "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", + "\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def selector_func(messages: Sequence[BaseMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + " selector_func=selector_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", + "\n", + "```{tip}\n", + "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", + "on each turn. \n", + "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", + "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", + "when it has finished running all the tools it needs to run, you can do so by\n", + "checking the last message and returning the agent if it is a\n", + "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Candidate Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", + "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", + "\n", + "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", + "\n", + "\n", + "```{note}\n", + "The `candidate_func` is only valid if `selector_func` is not set.\n", + "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we'll break it down into two main subtasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "21.99074074074074\n", + "---------- PlanningAgent ----------\n", + "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", + "\n", + "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", + "\n", + "Now, let's find the necessary rebound statistics:\n", + "\n", + "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", + "\n", + "Now, let's calculate the percentage change.\n", + "\n", + "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def candidate_func(messages: Sequence[BaseMessage]) -> List[str]:\n", + " # keep planning_agent first one to plan out the tasks\n", + " if messages[-1].source == \"user\":\n", + " return [planning_agent.name]\n", + "\n", + " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", + " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", + " # then return those specific agents\n", + " last_message = messages[-1]\n", + " if last_message.source == planning_agent.name:\n", + " participants = []\n", + " if web_search_agent.name in last_message.content:\n", + " participants.append(web_search_agent.name)\n", + " if data_analyst_agent.name in last_message.content:\n", + " participants.append(data_analyst_agent.name)\n", + " if participants:\n", + " return participants # SelectorGroupChat will select from the remaining two agents.\n", + "\n", + " # we can assume that the task is finished once the web_search_agent\n", + " # and data_analyst_agent have took their turns, thus we send\n", + " # in planning_agent to terminate the chat\n", + " previous_set_of_agents = set(message.source for message in messages)\n", + " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", + " return [planning_agent.name]\n", + "\n", + " # if no-conditions are met then return all the agents\n", + " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " candidate_func=candidate_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User Feedback\n", + "\n", + "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", + "provide user feedback during a run.\n", + "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", + "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", + "\n", + "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", + "web search example, we simply add it to the team and update the selector function\n", + "to always check for user feedback after the planning agent speaks.\n", + "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", + "the planning agent tries again, until the user approves." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- PlanningAgent ----------\n", + "To address the user's query, we will need to perform the following tasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Find the total rebounds for that player in the 2007-2008 season.\n", + "3. Find the total rebounds for that player in the 2008-2009 season.\n", + "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + " \n", + "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", + "\n", + "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", + "\n", + "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", + "\n", + "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", + "\n", + "\n", + "def selector_func_with_user_proxy(messages: Sequence[BaseMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", + " # Planning agent should be the first to engage when given a new task, or check progress.\n", + " return planning_agent.name\n", + " if messages[-1].source == planning_agent.name:\n", + " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", + " # User has approved the plan, proceed to the next agent.\n", + " return None\n", + " # Use the user proxy agent to get the user's approval to proceed.\n", + " return user_proxy_agent.name\n", + " if messages[-1].source == user_proxy_agent.name:\n", + " # If the user does not approve, return to the planning agent.\n", + " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " selector_func=selector_func_with_user_proxy,\n", + " allow_repeated_speaker=True,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, the user's feedback is incorporated into the conversation flow,\n", + "and the user can approve or reject the planning agent's decisions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Reasoning Models\n", + "\n", + "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", + "and `gemini-1.5-flash` are great at following instructions, so you can\n", + "have relatively detailed instructions in the selector prompt for the team and the \n", + "system messages for each agent to guide their behavior.\n", + "\n", + "However, if you are using a reasoning model like `o3-mini`, you will need to\n", + "keep the selector prompt and system messages as simple and to the point as possible.\n", + "This is because the reasoning models are already good at coming up with their own \n", + "instructions given the context provided to them.\n", + "\n", + "This also means that we don't need a planning agent to break down the task\n", + "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", + "uses a reasoning model can do that on its own.\n", + "\n", + "In the following example, we will use `o3-mini` as the model for the\n", + "agents and the team, and we will not use a planning agent.\n", + "Also, we are keeping the selector prompt and system messages as simple as possible." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", + ")\n", + "\n", + "user_proxy_agent = UserProxyAgent(\n", + " \"UserProxyAgent\",\n", + " description=\"A user to approve or disapprove tasks.\",\n", + ")\n", + "\n", + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "When the task is complete, let the user approve or disapprove the task.\n", + "\"\"\"\n", + "\n", + "team = SelectorGroupChat(\n", + " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination, # Use the same termination condition as before.\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- DataAnalystAgent ----------\n", + "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- DataAnalystAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "Approve. TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "For more guidance on how to prompt reasoning models, see the\n", + "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this in a script.\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Selector Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Often times we want better control over the selection process.\n", - "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", - "This allows us to implement more complex selection logic and state-based transitions.\n", - "\n", - "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", - "\n", - "```{note}\n", - "Returning `None` from the custom selector function will use the default model-based selection.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we need to follow these steps: \n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", - "3. Calculate the percentage change in his total rebounds between the two seasons.\n", - "\n", - "Let's delegate these tasks:\n", - "\n", - "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", - "\n", - "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", - "\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", - " if messages[-1].source != planning_agent.name:\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - " selector_func=selector_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", - "\n", - "```{tip}\n", - "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", - "on each turn. \n", - "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", - "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", - "when it has finished running all the tools it needs to run, you can do so by\n", - "checking the last message and returning the agent if it is a\n", - "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Candidate Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", - "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", - "\n", - "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", - "\n", - "\n", - "```{note}\n", - "The `candidate_func` is only valid if `selector_func` is not set.\n", - "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we'll break it down into two main subtasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "21.99074074074074\n", - "---------- PlanningAgent ----------\n", - "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", - "\n", - "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", - "\n", - "Now, let's find the necessary rebound statistics:\n", - "\n", - "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", - "\n", - "Now, let's calculate the percentage change.\n", - "\n", - "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]:\n", - " # keep planning_agent first one to plan out the tasks\n", - " if messages[-1].source == \"user\":\n", - " return [planning_agent.name]\n", - "\n", - " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", - " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", - " # then return those specific agents\n", - " last_message = messages[-1]\n", - " if last_message.source == planning_agent.name:\n", - " participants = []\n", - " if web_search_agent.name in last_message.content:\n", - " participants.append(web_search_agent.name)\n", - " if data_analyst_agent.name in last_message.content:\n", - " participants.append(data_analyst_agent.name)\n", - " if participants:\n", - " return participants # SelectorGroupChat will select from the remaining two agents.\n", - "\n", - " # we can assume that the task is finished once the web_search_agent\n", - " # and data_analyst_agent have took their turns, thus we send\n", - " # in planning_agent to terminate the chat\n", - " previous_set_of_agents = set(message.source for message in messages)\n", - " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", - " return [planning_agent.name]\n", - "\n", - " # if no-conditions are met then return all the agents\n", - " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " candidate_func=candidate_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## User Feedback\n", - "\n", - "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", - "provide user feedback during a run.\n", - "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", - "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", - "\n", - "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", - "web search example, we simply add it to the team and update the selector function\n", - "to always check for user feedback after the planning agent speaks.\n", - "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", - "the planning agent tries again, until the user approves." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- PlanningAgent ----------\n", - "To address the user's query, we will need to perform the following tasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Find the total rebounds for that player in the 2007-2008 season.\n", - "3. Find the total rebounds for that player in the 2008-2009 season.\n", - "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - " \n", - "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", - "\n", - "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", - "\n", - "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", - "\n", - "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", - "\n", - "\n", - "def selector_func_with_user_proxy(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", - " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", - " # Planning agent should be the first to engage when given a new task, or check progress.\n", - " return planning_agent.name\n", - " if messages[-1].source == planning_agent.name:\n", - " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", - " # User has approved the plan, proceed to the next agent.\n", - " return None\n", - " # Use the user proxy agent to get the user's approval to proceed.\n", - " return user_proxy_agent.name\n", - " if messages[-1].source == user_proxy_agent.name:\n", - " # If the user does not approve, return to the planning agent.\n", - " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " selector_func=selector_func_with_user_proxy,\n", - " allow_repeated_speaker=True,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, the user's feedback is incorporated into the conversation flow,\n", - "and the user can approve or reject the planning agent's decisions." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Reasoning Models\n", - "\n", - "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", - "and `gemini-1.5-flash` are great at following instructions, so you can\n", - "have relatively detailed instructions in the selector prompt for the team and the \n", - "system messages for each agent to guide their behavior.\n", - "\n", - "However, if you are using a reasoning model like `o3-mini`, you will need to\n", - "keep the selector prompt and system messages as simple and to the point as possible.\n", - "This is because the reasoning models are already good at coming up with their own \n", - "instructions given the context provided to them.\n", - "\n", - "This also means that we don't need a planning agent to break down the task\n", - "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", - "uses a reasoning model can do that on its own.\n", - "\n", - "In the following example, we will use `o3-mini` as the model for the\n", - "agents and the team, and we will not use a planning agent.\n", - "Also, we are keeping the selector prompt and system messages as simple as possible." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", - ")\n", - "\n", - "user_proxy_agent = UserProxyAgent(\n", - " \"UserProxyAgent\",\n", - " description=\"A user to approve or disapprove tasks.\",\n", - ")\n", - "\n", - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "When the task is complete, let the user approve or disapprove the task.\n", - "\"\"\"\n", - "\n", - "team = SelectorGroupChat(\n", - " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination, # Use the same termination condition as before.\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- DataAnalystAgent ----------\n", - "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- DataAnalystAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "Approve. TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "For more guidance on how to prompt reasoning models, see the\n", - "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index 8c9701550dad..653e2eb9b16a 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -1,517 +1,517 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Termination \n", - "\n", - "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", - "\n", - "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", - "\n", - "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", - "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", - "\n", - "Some important things to note about termination conditions: \n", - "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", - "- They can be combined using the AND and OR operators.\n", - "\n", - "```{note}\n", - "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", - "the termination condition is called after each agent responds.\n", - "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", - "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Built-In Termination Conditions: \n", - "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", - "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", - "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", - "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", - "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", - "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", - "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", - "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent.\n", - "9. {py:class}`~autogen_agentchat.conditions.TextMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.TextMessage` is produced by an agent.\n", - "10. {py:class}`~autogen_agentchat.conditions.FunctionCallTermination`: Stops when a {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent` containing a {py:class}`~autogen_core.models.FunctionExecutionResult` with a matching name is produced by an agent." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Basic Usage\n", - "\n", - "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " temperature=1,\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Gentle rain whispers, \n", - "Cobblestones glisten softly— \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 30, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", - "\n", - "For example:\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "\n", - "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", - "[Prompt tokens: 70, Completion tokens: 120]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 100\n", - "Total completion tokens: 139\n", - "Duration: 3.34 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Termination \n", + "\n", + "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", + "\n", + "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", + "\n", + "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.BaseMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", + "\n", + "Some important things to note about termination conditions: \n", + "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", + "- They can be combined using the AND and OR operators.\n", + "\n", + "```{note}\n", + "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", + "the termination condition is called after each agent responds.\n", + "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", + "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Built-In Termination Conditions: \n", + "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", + "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", + "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", + "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", + "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", + "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", + "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", + "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent.\n", + "9. {py:class}`~autogen_agentchat.conditions.TextMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.TextMessage` is produced by an agent.\n", + "10. {py:class}`~autogen_agentchat.conditions.FunctionCallTermination`: Stops when a {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent` containing a {py:class}`~autogen_core.models.FunctionExecutionResult` with a matching name is produced by an agent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic Usage\n", + "\n", + "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " temperature=1,\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Gentle rain whispers, \n", + "Cobblestones glisten softly— \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 30, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", + "\n", + "For example:\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "\n", + "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", + "[Prompt tokens: 70, Completion tokens: 120]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 100\n", + "Total completion tokens: 139\n", + "Duration: 3.34 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=3)\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your feedback. Here is the revised Haiku:\n", + "\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 181, Completion tokens: 32]\n", + "---------- critic ----------\n", + "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 234, Completion tokens: 54]\n", + "---------- primary ----------\n", + "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", + "[Prompt tokens: 279, Completion tokens: 39]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 694\n", + "Total completion tokens: 125\n", + "Duration: 6.43 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The team continued from where it left off, allowing the primary agent to respond to the feedback." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Combining Termination Conditions\n", + "\n", + "Let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Spring breeze gently hums, \n", + "Cherry blossoms in full bloom— \n", + "Paris wakes to life.\n", + "[Prompt tokens: 467, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", + "\n", + "APPROVE\n", + "[Prompt tokens: 746, Completion tokens: 93]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1213\n", + "Total completion tokens: 112\n", + "Duration: 2.75 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=10)\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "combined_termination = max_msg_termination | text_termination\n", + "\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", + "\n", + "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "combined_termination = max_msg_termination & text_termination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Termination Condition\n", + "\n", + "The built-in termination conditions are sufficient for most use cases.\n", + "However, there may be cases where you need to implement a custom termination condition that doesn't fit into the existing ones.\n", + "You can do this by subclassing the {py:class}`~autogen_agentchat.base.TerminationCondition` class.\n", + "\n", + "In this example, we create a custom termination condition that stops the conversation when\n", + "a specific function call is made." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Sequence\n", + "\n", + "from autogen_agentchat.base import TerminatedException, TerminationCondition\n", + "from autogen_agentchat.messages import BaseMessage, StopMessage, ToolCallExecutionEvent\n", + "from autogen_core import Component\n", + "from pydantic import BaseModel\n", + "from typing_extensions import Self\n", + "\n", + "\n", + "class FunctionCallTerminationConfig(BaseModel):\n", + " \"\"\"Configuration for the termination condition to allow for serialization\n", + " and deserialization of the component.\n", + " \"\"\"\n", + "\n", + " function_name: str\n", + "\n", + "\n", + "class FunctionCallTermination(TerminationCondition, Component[FunctionCallTerminationConfig]):\n", + " \"\"\"Terminate the conversation if a FunctionExecutionResult with a specific name is received.\"\"\"\n", + "\n", + " component_config_schema = FunctionCallTerminationConfig\n", + " \"\"\"The schema for the component configuration.\"\"\"\n", + "\n", + " def __init__(self, function_name: str) -> None:\n", + " self._terminated = False\n", + " self._function_name = function_name\n", + "\n", + " @property\n", + " def terminated(self) -> bool:\n", + " return self._terminated\n", + "\n", + " async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None:\n", + " if self._terminated:\n", + " raise TerminatedException(\"Termination condition has already been reached\")\n", + " for message in messages:\n", + " if isinstance(message, ToolCallExecutionEvent):\n", + " for execution in message.content:\n", + " if execution.name == self._function_name:\n", + " self._terminated = True\n", + " return StopMessage(\n", + " content=f\"Function '{self._function_name}' was executed.\",\n", + " source=\"FunctionCallTermination\",\n", + " )\n", + " return None\n", + "\n", + " async def reset(self) -> None:\n", + " self._terminated = False\n", + "\n", + " def _to_config(self) -> FunctionCallTerminationConfig:\n", + " return FunctionCallTerminationConfig(\n", + " function_name=self._function_name,\n", + " )\n", + "\n", + " @classmethod\n", + " def _from_config(cls, config: FunctionCallTerminationConfig) -> Self:\n", + " return cls(\n", + " function_name=config.function_name,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use this new termination condition to stop the conversation when the critic agent approves a message\n", + "using the `approve` function call.\n", + "\n", + "First we create a simple function that will be called when the critic agent approves a message." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def approve() -> None:\n", + " \"\"\"Approve the message when all feedbacks have been addressed.\"\"\"\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we create the agents. The critic agent is equipped with the `approve` tool." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " temperature=1,\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent with the approve function as a tool.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " tools=[approve], # Register the approve function as a tool.\n", + " system_message=\"Provide constructive feedback. Use the approve tool to approve when all feedbacks are addressed.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we create the termination condition and the team.\n", + "We run the team with the poem-writing task." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Raindrops gently fall, \n", + "Cobblestones shine in dim light— \n", + "Paris dreams in grey. \n", + "---------- critic ----------\n", + "This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.\n", + "---------- primary ----------\n", + "Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\n", + "\n", + "Eiffel stands in mist, \n", + "Seine's ripple mirrors the sky— \n", + "Spring whispers anew. \n", + "---------- critic ----------\n", + "[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')]\n", + "---------- critic ----------\n", + "[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)]\n", + "---------- critic ----------\n", + "None\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Write a unique, Haiku about the weather in Paris', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=23), metadata={}, content='Raindrops gently fall, \\nCobblestones shine in dim light— \\nParis dreams in grey. ', type='TextMessage'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=99, completion_tokens=90), metadata={}, content='This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=152, completion_tokens=48), metadata={}, content=\"Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\\n\\nEiffel stands in mist, \\nSeine's ripple mirrors the sky— \\nSpring whispers anew. \", type='TextMessage'), ToolCallRequestEvent(source='critic', models_usage=RequestUsage(prompt_tokens=246, completion_tokens=11), metadata={}, content=[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='critic', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='critic', models_usage=None, metadata={}, content='None', type='ToolCallSummaryMessage')], stop_reason=\"Function 'approve' was executed.\")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_call_termination = FunctionCallTermination(function_name=\"approve\")\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=function_call_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that the conversation stopped when the critic agent approved the message using the `approve` function call." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=3)\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- primary ----------\n", - "Thank you for your feedback. Here is the revised Haiku:\n", - "\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 181, Completion tokens: 32]\n", - "---------- critic ----------\n", - "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", - "\n", - "APPROVE\n", - "[Prompt tokens: 234, Completion tokens: 54]\n", - "---------- primary ----------\n", - "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", - "[Prompt tokens: 279, Completion tokens: 39]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 694\n", - "Total completion tokens: 125\n", - "Duration: 6.43 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The team continued from where it left off, allowing the primary agent to respond to the feedback." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Combining Termination Conditions\n", - "\n", - "Let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Spring breeze gently hums, \n", - "Cherry blossoms in full bloom— \n", - "Paris wakes to life.\n", - "[Prompt tokens: 467, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", - "\n", - "APPROVE\n", - "[Prompt tokens: 746, Completion tokens: 93]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'APPROVE' mentioned\n", - "Total prompt tokens: 1213\n", - "Total completion tokens: 112\n", - "Duration: 2.75 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=10)\n", - "text_termination = TextMentionTermination(\"APPROVE\")\n", - "combined_termination = max_msg_termination | text_termination\n", - "\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", - "\n", - "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "combined_termination = max_msg_termination & text_termination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Termination Condition\n", - "\n", - "The built-in termination conditions are sufficient for most use cases.\n", - "However, there may be cases where you need to implement a custom termination condition that doesn't fit into the existing ones.\n", - "You can do this by subclassing the {py:class}`~autogen_agentchat.base.TerminationCondition` class.\n", - "\n", - "In this example, we create a custom termination condition that stops the conversation when\n", - "a specific function call is made." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Sequence\n", - "\n", - "from autogen_agentchat.base import TerminatedException, TerminationCondition\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, StopMessage, ToolCallExecutionEvent\n", - "from autogen_core import Component\n", - "from pydantic import BaseModel\n", - "from typing_extensions import Self\n", - "\n", - "\n", - "class FunctionCallTerminationConfig(BaseModel):\n", - " \"\"\"Configuration for the termination condition to allow for serialization\n", - " and deserialization of the component.\n", - " \"\"\"\n", - "\n", - " function_name: str\n", - "\n", - "\n", - "class FunctionCallTermination(TerminationCondition, Component[FunctionCallTerminationConfig]):\n", - " \"\"\"Terminate the conversation if a FunctionExecutionResult with a specific name is received.\"\"\"\n", - "\n", - " component_config_schema = FunctionCallTerminationConfig\n", - " \"\"\"The schema for the component configuration.\"\"\"\n", - "\n", - " def __init__(self, function_name: str) -> None:\n", - " self._terminated = False\n", - " self._function_name = function_name\n", - "\n", - " @property\n", - " def terminated(self) -> bool:\n", - " return self._terminated\n", - "\n", - " async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:\n", - " if self._terminated:\n", - " raise TerminatedException(\"Termination condition has already been reached\")\n", - " for message in messages:\n", - " if isinstance(message, ToolCallExecutionEvent):\n", - " for execution in message.content:\n", - " if execution.name == self._function_name:\n", - " self._terminated = True\n", - " return StopMessage(\n", - " content=f\"Function '{self._function_name}' was executed.\",\n", - " source=\"FunctionCallTermination\",\n", - " )\n", - " return None\n", - "\n", - " async def reset(self) -> None:\n", - " self._terminated = False\n", - "\n", - " def _to_config(self) -> FunctionCallTerminationConfig:\n", - " return FunctionCallTerminationConfig(\n", - " function_name=self._function_name,\n", - " )\n", - "\n", - " @classmethod\n", - " def _from_config(cls, config: FunctionCallTerminationConfig) -> Self:\n", - " return cls(\n", - " function_name=config.function_name,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use this new termination condition to stop the conversation when the critic agent approves a message\n", - "using the `approve` function call.\n", - "\n", - "First we create a simple function that will be called when the critic agent approves a message." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def approve() -> None:\n", - " \"\"\"Approve the message when all feedbacks have been addressed.\"\"\"\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then we create the agents. The critic agent is equipped with the `approve` tool." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " temperature=1,\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent with the approve function as a tool.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " tools=[approve], # Register the approve function as a tool.\n", - " system_message=\"Provide constructive feedback. Use the approve tool to approve when all feedbacks are addressed.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we create the termination condition and the team.\n", - "We run the team with the poem-writing task." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Raindrops gently fall, \n", - "Cobblestones shine in dim light— \n", - "Paris dreams in grey. \n", - "---------- critic ----------\n", - "This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.\n", - "---------- primary ----------\n", - "Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\n", - "\n", - "Eiffel stands in mist, \n", - "Seine's ripple mirrors the sky— \n", - "Spring whispers anew. \n", - "---------- critic ----------\n", - "[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')]\n", - "---------- critic ----------\n", - "[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)]\n", - "---------- critic ----------\n", - "None\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Write a unique, Haiku about the weather in Paris', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=23), metadata={}, content='Raindrops gently fall, \\nCobblestones shine in dim light— \\nParis dreams in grey. ', type='TextMessage'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=99, completion_tokens=90), metadata={}, content='This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=152, completion_tokens=48), metadata={}, content=\"Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\\n\\nEiffel stands in mist, \\nSeine's ripple mirrors the sky— \\nSpring whispers anew. \", type='TextMessage'), ToolCallRequestEvent(source='critic', models_usage=RequestUsage(prompt_tokens=246, completion_tokens=11), metadata={}, content=[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='critic', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='critic', models_usage=None, metadata={}, content='None', type='ToolCallSummaryMessage')], stop_reason=\"Function 'approve' was executed.\")" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function_call_termination = FunctionCallTermination(function_name=\"approve\")\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=function_call_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that the conversation stopped when the critic agent approved the message using the `approve` function call." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index a4a9cee1b7b6..b7289bae66de 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -24,8 +24,8 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseChatMessage, + BaseMessage, TextMessage, ToolCallExecutionEvent, ToolCallRequestEvent, @@ -354,7 +354,7 @@ async def _retrieve_initial_state(self) -> None: self._initial_message_ids = initial_message_ids @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: """The types of messages that the assistant agent produces.""" return (TextMessage,) @@ -393,7 +393,7 @@ async def _execute_tool_call(self, tool_call: FunctionCall, cancellation_token: result = await tool.run_json(arguments, cancellation_token) return tool.return_value_as_string(result) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: """Handle incoming messages and return a response.""" async for message in self.on_messages_stream(messages, cancellation_token): @@ -402,8 +402,8 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: """Handle incoming messages and return a response.""" await self._ensure_initialized() @@ -412,7 +412,7 @@ async def on_messages_stream( await self.handle_incoming_message(message, cancellation_token) # Inner messages for tool calls - inner_messages: List[AgentEvent | ChatMessage] = [] + inner_messages: List[BaseMessage] = [] # Create and start a run run: Run = await cancellation_token.link_future( @@ -519,7 +519,7 @@ async def on_messages_stream( chat_message = TextMessage(source=self.name, content=text_content[0].text.value) yield Response(chat_message=chat_message, inner_messages=inner_messages) - async def handle_incoming_message(self, message: ChatMessage, cancellation_token: CancellationToken) -> None: + async def handle_incoming_message(self, message: BaseChatMessage, cancellation_token: CancellationToken) -> None: """Handle regular text messages by adding them to the thread.""" content: str | List[MessageContentPartParam] if isinstance(message.content, str): diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index 4fe5dd919eda..cc9268e54192 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -24,7 +24,7 @@ import PIL.Image from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, StructuredMessage, TextMessage +from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MultiModalMessage, StructuredMessage, TextMessage from autogen_agentchat.utils import content_to_str, remove_images from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall from autogen_core import Image as AGImage @@ -385,7 +385,7 @@ async def _set_debug_dir(self, debug_dir: str | None) -> None: ) @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (MultiModalMessage,) async def on_reset(self, cancellation_token: CancellationToken) -> None: @@ -422,15 +422,15 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: ) ) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: + self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[BaseMessage | Response, None]: for chat_message in messages: if isinstance(chat_message, StructuredMessage): serialized_content = chat_message.content.model_dump_json() @@ -448,7 +448,7 @@ async def on_messages_stream( ) ) - self.inner_messages: List[AgentEvent | ChatMessage] = [] + self.inner_messages: List[BaseMessage] = [] self.model_usage: List[RequestUsage] = [] try: content = await self._generate_reply(cancellation_token=cancellation_token) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index a8104c0ebc44..4ff738ffbe92 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -4,7 +4,7 @@ from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage +from autogen_agentchat.messages import BaseMessage, TextMessage from autogen_core.models import ( ChatCompletionClient, LLMMessage, @@ -190,8 +190,8 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: # Get the agent's response to the task. task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) - messages: Sequence[AgentEvent | ChatMessage] = task_result.messages - message: AgentEvent | ChatMessage = messages[-1] + messages: Sequence[BaseMessage] = task_result.messages + message: BaseMessage = messages[-1] response_str = message.content # Log the model call diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py index 92964dfbec12..3a69cb0e1018 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import AgentEvent, ChatMessage +from autogen_agentchat.messages import BaseMessage from autogen_core import Image from autogen_core.models import ( AssistantMessage, @@ -343,7 +343,7 @@ def log_model_task( if self.level > self.levels["INFO"]: return None - messages: Sequence[AgentEvent | ChatMessage] = task_result.messages + messages: Sequence[BaseMessage] = task_result.messages message = messages[-1] response_str = message.content if not isinstance(response_str, str): diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index 1951205e8ed5..169325e70c3b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -14,8 +14,7 @@ from autogen_agentchat.base import Response, TaskResult from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseMessage, ModelClientStreamingChunkEvent, MultiModalMessage, UserInputRequestedEvent, @@ -56,7 +55,7 @@ def aprint(output: str, end: str = "\n") -> Awaitable[None]: return asyncio.to_thread(print, output, end=end) -def _extract_message_content(message: AgentEvent | ChatMessage) -> Tuple[List[str], List[Image]]: +def _extract_message_content(message: BaseMessage) -> Tuple[List[str], List[Image]]: if isinstance(message, MultiModalMessage): text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] @@ -100,7 +99,7 @@ async def _aprint_message_content( async def RichConsole( - stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], + stream: AsyncGenerator[BaseMessage | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -117,7 +116,7 @@ async def RichConsole( It will be improved in future releases. Args: - stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. + stream (AsyncGenerator[BaseMessage | TaskResult, None] | AsyncGenerator[BaseMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. @@ -191,7 +190,7 @@ async def RichConsole( pass else: # Cast required for mypy to be happy - message = cast(AgentEvent | ChatMessage, message) # type: ignore + message = cast(BaseMessage, message) # type: ignore text_parts, image_parts = _extract_message_content(message) # Add usage stats if needed diff --git a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py index 96dbaec1098c..b51d32d68e9a 100644 --- a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py +++ b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py @@ -9,10 +9,10 @@ import aiofiles import yaml from autogen_agentchat.agents import UserProxyAgent -from autogen_agentchat.base import TaskResult, Team -from autogen_agentchat.messages import AgentEvent, ChatMessage +from autogen_agentchat.base import TaskResult +from autogen_agentchat.messages import BaseMessage from autogen_agentchat.teams import BaseGroupChat -from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel +from autogen_core import EVENT_LOGGER_NAME, CancellationToken, ComponentModel from autogen_core.logging import LLMCallEvent from ..datamodel.types import EnvironmentVariable, LLMCallEventMessage, TeamResult @@ -102,7 +102,7 @@ async def run_stream( input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None, env_vars: Optional[List[EnvironmentVariable]] = None, - ) -> AsyncGenerator[Union[AgentEvent | ChatMessage | LLMCallEvent, ChatMessage, TeamResult], None]: + ) -> AsyncGenerator[Union[BaseMessage | LLMCallEvent, TeamResult], None]: """Stream team execution results""" start_time = time.time() team = None diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py index 458620464dbb..4351b634f528 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py @@ -2,12 +2,11 @@ import logging import traceback from datetime import datetime, timezone -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional from autogen_agentchat.base._task import TaskResult from autogen_agentchat.messages import ( - AgentEvent, - ChatMessage, + BaseMessage, HandoffMessage, ModelClientStreamingChunkEvent, MultiModalMessage, @@ -17,7 +16,6 @@ ToolCallRequestEvent, ) from autogen_core import CancellationToken -from autogen_core import Image as AGImage from fastapi import WebSocket, WebSocketDisconnect from ...database import DatabaseManager @@ -160,7 +158,7 @@ async def start_stream(self, run_id: int, task: str, team_config: dict) -> None: finally: self._cancellation_tokens.pop(run_id, None) - async def _save_message(self, run_id: int, message: Union[AgentEvent | ChatMessage, ChatMessage]) -> None: + async def _save_message(self, run_id: int, message: BaseMessage) -> None: """Save a message to the database""" run = await self._get_run(run_id) From 5b8174b93934cc6d7e25a24b715e095141e214b9 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 23 Mar 2025 17:07:27 -0700 Subject: [PATCH 05/26] wip: agentchat done --- .../src/autogen_agentchat/messages.py | 13 +- .../teams/_group_chat/_base_group_chat.py | 21 ++- .../_group_chat/_chat_agent_container.py | 35 +++-- .../tests/test_group_chat.py | 120 +++++++++++++++--- .../autogen-agentchat/tests/test_messages.py | 8 +- 5 files changed, 154 insertions(+), 43 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index d992bed96d6c..9e00cb9cca41 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -270,7 +270,7 @@ class MessageFactory: This is useful for deserializing messages from JSON data. """ - def __init__(self): + def __init__(self) -> None: self._message_types: Dict[str, type[BaseMessage]] = {} # Register all message types. self._message_types[TextMessage.__name__] = TextMessage @@ -285,15 +285,22 @@ def __init__(self): self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent self._message_types[ThoughtEvent.__name__] = ThoughtEvent + def is_registered(self, message_type: type[BaseMessage]) -> bool: + """Check if a message type is registered with the factory.""" + # Get the class name of the message type. + class_name = message_type.__name__ + # Check if the class name is already registered. + return class_name in self._message_types + def register(self, message_type: type[BaseMessage]) -> None: """Register a new message type with the factory.""" + if self.is_registered(message_type): + raise ValueError(f"Message type {message_type} is already registered.") if not issubclass(message_type, BaseMessage): raise ValueError(f"Message type {message_type} must be a subclass of BaseMessage.") # Get the class name of the class_name = message_type.__name__ # Check if the class name is already registered. - if class_name in self._message_types: - raise ValueError(f"Message type {class_name} is already registered.") # Register the message type. self._message_types[class_name] = message_type diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index a33d5897c20f..264dee15f089 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -203,7 +203,7 @@ async def run( Once the team is stopped, the termination condition is reset. Args: - task (str | ChatMessage | Sequence[ChatMessage] | None): The task to run the team with. Can be a string, a single :class:`ChatMessage` , or a list of :class:`ChatMessage`. + task (str | BaseChatMessage | Sequence[BaseChatMessage] | None): The task to run the team with. Can be a string, a single :class:`BaseChatMessage` , or a list of :class:`BaseChatMessage`. cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately. Setting the cancellation token potentially put the team in an inconsistent state, and it may not reset the termination condition. @@ -311,14 +311,14 @@ async def run_stream( :attr:`~autogen_agentchat.base.TaskResult.messages`. Args: - task (str | ChatMessage | Sequence[ChatMessage] | None): The task to run the team with. Can be a string, a single :class:`ChatMessage` , or a list of :class:`ChatMessage`. + task (str | BaseChatMessage | Sequence[BaseChatMessage] | None): The task to run the team with. Can be a string, a single :class:`BaseChatMessage` , or a list of :class:`BaseChatMessage`. cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately. Setting the cancellation token potentially put the team in an inconsistent state, and it may not reset the termination condition. To gracefully stop the team, use :class:`~autogen_agentchat.conditions.ExternalTermination` instead. Returns: - stream: an :class:`~collections.abc.AsyncGenerator` that yields :class:`~autogen_agentchat.messages.AgentEvent`, :class:`~autogen_agentchat.messages.ChatMessage`, and the final result :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. + stream: an :class:`~collections.abc.AsyncGenerator` that yields :class:`~autogen_agentchat.messages.AgentEvent`, :class:`~autogen_agentchat.messages.BaseChatMessage`, and the final result :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. Example using the :class:`~autogen_agentchat.teams.RoundRobinGroupChat` team: @@ -405,14 +405,25 @@ async def main() -> None: messages = [TextMessage(content=task, source="user")] elif isinstance(task, BaseChatMessage): messages = [task] - else: + elif isinstance(task, list): if not task: raise ValueError("Task list cannot be empty.") messages = [] for msg in task: if not isinstance(msg, BaseChatMessage): - raise ValueError("All messages in task list must be valid ChatMessage types") + raise ValueError("All messages in task list must be valid BaseChatMessage types") messages.append(msg) + else: + raise ValueError("Task must be a string, a BaseChatMessage, or a list of BaseChatMessage.") + # Check if the messages types are registered with the message factory. + if messages is not None: + for msg in messages: + if not self._message_factory.is_registered(msg.__class__): + raise ValueError( + f"Message type {msg.__class__} is not registered with the message factory. " + "Please register it with the message factory by adding it to the " + "custom_message_types list when creating the team." + ) if self._is_running: raise ValueError("The team is already running, it cannot run again until it is stopped.") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index 7abf0c6a7b3f..b2a22ca4cdc2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -2,7 +2,7 @@ from autogen_core import DefaultTopicId, MessageContext, event, rpc -from autogen_agentchat.messages import BaseChatMessage, MessageFactory +from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MessageFactory from ...base import ChatAgent, Response from ...state import ChatAgentContainerState @@ -27,6 +27,8 @@ class ChatAgentContainer(SequentialRoutedAgent): parent_topic_type (str): The topic type of the parent orchestrator. output_topic_type (str): The topic type for the output. agent (ChatAgent): The agent to delegate message handling to. + message_factory (MessageFactory): The message factory to use for + creating messages from JSON data. """ def __init__( @@ -51,12 +53,13 @@ def __init__( async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> None: """Handle a start event by appending the content to the buffer.""" if message.messages is not None: - self._message_buffer.extend(message.messages) + for msg in message.messages: + self._buffer_message(msg) @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: """Handle an agent response event by appending the content to the buffer.""" - self._message_buffer.append(message.agent_response.chat_message) + self._buffer_message(message.agent_response.chat_message) @rpc async def handle_reset(self, message: GroupChatReset, ctx: MessageContext) -> None: @@ -72,17 +75,10 @@ async def handle_request(self, message: GroupChatRequestPublish, ctx: MessageCon response: Response | None = None async for msg in self._agent.on_messages_stream(self._message_buffer, ctx.cancellation_token): if isinstance(msg, Response): - # Log the response. - await self.publish_message( - GroupChatMessage(message=msg.chat_message), - topic_id=DefaultTopicId(type=self._output_topic_type), - ) + await self._log_message(msg.chat_message) response = msg else: - # Log the message. - await self.publish_message( - GroupChatMessage(message=msg), topic_id=DefaultTopicId(type=self._output_topic_type) - ) + await self._log_message(msg) if response is None: raise ValueError("The agent did not produce a final response. Check the agent's on_messages_stream method.") @@ -94,6 +90,21 @@ async def handle_request(self, message: GroupChatRequestPublish, ctx: MessageCon cancellation_token=ctx.cancellation_token, ) + def _buffer_message(self, message: BaseChatMessage) -> None: + if not self._message_factory.is_registered(message.__class__): + raise ValueError(f"Message type {message.__class__} is not registered.") + # Buffer the message. + self._message_buffer.append(message) + + async def _log_message(self, message: BaseMessage) -> None: + if not self._message_factory.is_registered(message.__class__): + raise ValueError(f"Message type {message.__class__} is not registered.") + # Log the message. + await self.publish_message( + GroupChatMessage(message=message), + topic_id=DefaultTopicId(type=self._output_topic_type), + ) + @rpc async def handle_pause(self, message: GroupChatPause, ctx: MessageContext) -> None: """Handle a pause event by pausing the agent.""" diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 5707d9a2f33b..310ac071a35f 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -103,6 +103,32 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: self._last_message = None +class _UnknownMessageType(BaseChatMessage): + def to_llm_messages(self) -> List[LLMMessage]: + raise NotImplementedError("This message type is not supported.") + + def content_to_str(self) -> str: + raise NotImplementedError("This message type is not supported.") + + def content_to_render(self) -> str: + raise NotImplementedError("This message type is not supported.") + + +class _UnknownMessageTypeAgent(BaseChatAgent): + def __init__(self, name: str, description: str) -> None: + super().__init__(name, description) + + @property + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + return (_UnknownMessageType,) + + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + return Response(chat_message=_UnknownMessageType(content="Unknown message type", source=self.name)) + + async def on_reset(self, cancellation_token: CancellationToken) -> None: + pass + + class _StopAgent(_EchoAgent): def __init__(self, name: str, description: str, *, stop_at: int = 1) -> None: super().__init__(name, description) @@ -124,12 +150,12 @@ def _pass_function(input: str) -> str: return "pass" -class InputTask1(BaseModel): +class _InputTask1(BaseModel): task: str data: List[str] -class InputTask2(BaseModel): +class _InputTask2(BaseModel): task: str data: str @@ -223,6 +249,38 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: assert result.messages[1:] == result_2.messages[1:] +@pytest.mark.asyncio +async def test_round_robin_group_chat_unknown_task_message_type(runtime: AgentRuntime | None) -> None: + model_client = ReplayChatCompletionClient([]) + agent1 = AssistantAgent("agent1", model_client=model_client) + agent2 = AssistantAgent("agent2", model_client=model_client) + termination = TextMentionTermination("TERMINATE") + team1 = RoundRobinGroupChat( + participants=[agent1, agent2], + termination_condition=termination, + runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask2]], + ) + with pytest.raises(ValueError, match=r"Message type .*StructuredMessage\[_InputTask1\].* is not registered"): + await team1.run( + task=StructuredMessage[_InputTask1]( + content=_InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + source="user", + ) + ) + + +@pytest.mark.asyncio +async def test_round_robin_group_chat_unknown_agent_message_type() -> None: + model_client = ReplayChatCompletionClient(["Hello"]) + agent1 = AssistantAgent("agent1", model_client=model_client) + agent2 = _UnknownMessageTypeAgent("agent2", "I am an unknown message type agent") + termination = TextMentionTermination("TERMINATE") + team1 = RoundRobinGroupChat(participants=[agent1, agent2], termination_condition=termination) + with pytest.raises(ValueError, match="Message type .*UnknownMessageType.* not registered"): + await team1.run(task=TextMessage(content="Write a program that prints 'Hello, world!'", source="user")) + + @pytest.mark.asyncio @pytest.mark.parametrize( "task", @@ -231,12 +289,12 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], [ - StructuredMessage[InputTask1]( - content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + StructuredMessage[_InputTask1]( + content=_InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), source="user", ), - StructuredMessage[InputTask2]( - content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + StructuredMessage[_InputTask2]( + content=_InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" ), ], ], @@ -249,13 +307,23 @@ async def test_round_robin_group_chat_state(task: TaskType, runtime: AgentRuntim agent1 = AssistantAgent("agent1", model_client=model_client) agent2 = AssistantAgent("agent2", model_client=model_client) termination = TextMentionTermination("TERMINATE") - team1 = RoundRobinGroupChat(participants=[agent1, agent2], termination_condition=termination, runtime=runtime) + team1 = RoundRobinGroupChat( + participants=[agent1, agent2], + termination_condition=termination, + runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], + ) await team1.run(task=task) state = await team1.save_state() agent3 = AssistantAgent("agent1", model_client=model_client) agent4 = AssistantAgent("agent2", model_client=model_client) - team2 = RoundRobinGroupChat(participants=[agent3, agent4], termination_condition=termination, runtime=runtime) + team2 = RoundRobinGroupChat( + participants=[agent3, agent4], + termination_condition=termination, + runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], + ) await team2.load_state(state) state2 = await team2.save_state() assert state == state2 @@ -524,12 +592,12 @@ async def test_selector_group_chat(runtime: AgentRuntime | None) -> None: [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], [ - StructuredMessage[InputTask1]( - content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + StructuredMessage[_InputTask1]( + content=_InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), source="user", ), - StructuredMessage[InputTask2]( - content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + StructuredMessage[_InputTask2]( + content=_InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" ), ], ], @@ -547,6 +615,7 @@ async def test_selector_group_chat_state(task: TaskType, runtime: AgentRuntime | termination_condition=termination, model_client=model_client, runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], ) await team1.run(task=task) state = await team1.save_state() @@ -554,7 +623,10 @@ async def test_selector_group_chat_state(task: TaskType, runtime: AgentRuntime | agent3 = AssistantAgent("agent1", model_client=model_client) agent4 = AssistantAgent("agent2", model_client=model_client) team2 = SelectorGroupChat( - participants=[agent3, agent4], termination_condition=termination, model_client=model_client + participants=[agent3, agent4], + termination_condition=termination, + model_client=model_client, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], ) await team2.load_state(state) state2 = await team2.save_state() @@ -897,12 +969,12 @@ async def test_swarm_handoff(runtime: AgentRuntime | None) -> None: [TextMessage(content="Write a program that prints 'Hello, world!'", source="user")], [MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user")], [ - StructuredMessage[InputTask1]( - content=InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), + StructuredMessage[_InputTask1]( + content=_InputTask1(task="Write a program that prints 'Hello, world!'", data=["a", "b", "c"]), source="user", ), - StructuredMessage[InputTask2]( - content=InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" + StructuredMessage[_InputTask2]( + content=_InputTask2(task="Write a program that prints 'Hello, world!'", data="a"), source="user" ), ], ], @@ -914,14 +986,24 @@ async def test_swarm_handoff_state(task: TaskType, runtime: AgentRuntime | None) third_agent = _HandOffAgent("third_agent", description="third agent", next_agent="first_agent") termination = MaxMessageTermination(6) - team1 = Swarm([second_agent, first_agent, third_agent], termination_condition=termination, runtime=runtime) + team1 = Swarm( + [second_agent, first_agent, third_agent], + termination_condition=termination, + runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], + ) await team1.run(task=task) state = await team1.save_state() first_agent2 = _HandOffAgent("first_agent", description="first agent", next_agent="second_agent") second_agent2 = _HandOffAgent("second_agent", description="second agent", next_agent="third_agent") third_agent2 = _HandOffAgent("third_agent", description="third agent", next_agent="first_agent") - team2 = Swarm([second_agent2, first_agent2, third_agent2], termination_condition=termination, runtime=runtime) + team2 = Swarm( + [second_agent2, first_agent2, third_agent2], + termination_condition=termination, + runtime=runtime, + custom_message_types=[StructuredMessage[_InputTask1], StructuredMessage[_InputTask2]], + ) await team2.load_state(state) state2 = await team2.save_state() assert state == state2 diff --git a/python/packages/autogen-agentchat/tests/test_messages.py b/python/packages/autogen-agentchat/tests/test_messages.py index 518a875d2266..c3dd0acac836 100644 --- a/python/packages/autogen-agentchat/tests/test_messages.py +++ b/python/packages/autogen-agentchat/tests/test_messages.py @@ -18,7 +18,7 @@ def test_structured_message() -> None: ) # Check that the message type is correct - assert message.type == "StructuredMessage[TestContent]" + assert message.type == "StructuredMessage[TestContent]" # type: ignore # Check that the content is of the correct type assert isinstance(message.content, TestContent) @@ -50,7 +50,7 @@ def test_message_factory() -> None: assert isinstance(text_message, TextMessage) assert text_message.source == "test_agent" assert text_message.content == "Hello, world!" - assert text_message.type == "TextMessage" + assert text_message.type == "TextMessage" # type: ignore # Handoff message data handoff_data = { @@ -66,7 +66,7 @@ def test_message_factory() -> None: assert handoff_message.source == "test_agent" assert handoff_message.content == "handoff to another agent" assert handoff_message.target == "target_agent" - assert handoff_message.type == "HandoffMessage" + assert handoff_message.type == "HandoffMessage" # type: ignore # Structured message data structured_data = { @@ -90,4 +90,4 @@ def test_message_factory() -> None: assert structured_message.source == "test_agent" assert structured_message.content.field1 == "test" assert structured_message.content.field2 == 42 - assert structured_message.type == "StructuredMessage[TestContent]" + assert structured_message.type == "StructuredMessage[TestContent]" # type: ignore From a4cb79ce75c1deae969bf120354bc6115b43a483 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 23 Mar 2025 19:30:37 -0700 Subject: [PATCH 06/26] improve docs --- .../agents/_assistant_agent.py | 6 +- .../agents/_society_of_mind_agent.py | 2 +- .../src/autogen_agentchat/messages.py | 141 +- .../_group_chat/_chat_agent_container.py | 2 +- .../_magentic_one_orchestrator.py | 2 +- .../_group_chat/_round_robin_group_chat.py | 2 +- .../teams/_group_chat/_selector_group_chat.py | 2 +- .../teams/_group_chat/_swarm_group_chat.py | 2 +- .../tests/test_group_chat.py | 2 +- .../agentchat-user-guide/custom-agents.ipynb | 1488 ++++++++--------- .../tutorial/agents.ipynb | 4 +- .../tutorial/messages.ipynb | 8 +- .../agents/file_surfer/_file_surfer.py | 24 +- .../agents/openai/_openai_assistant_agent.py | 18 +- .../web_surfer/_multimodal_web_surfer.py | 18 +- .../src/autogen_ext/ui/_rich_console.py | 6 +- .../tests/test_openai_assistant_agent.py | 4 +- 17 files changed, 883 insertions(+), 848 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 6e418cb0f512..9b6d5621da03 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -812,8 +812,10 @@ async def _add_messages_to_context( Add incoming messages to the model context. """ for msg in messages: - for llm_msg in msg.to_llm_messages(): - await model_context.add_message(llm_msg) + if isinstance(msg, HandoffMessage): + for llm_msg in msg.context: + await model_context.add_message(llm_msg) + await model_context.add_message(msg.content_to_model_message()) @staticmethod async def _update_model_context_with_memory( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index a777879eaea8..2a9572e8937d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -168,7 +168,7 @@ async def on_messages_stream( llm_messages: List[LLMMessage] = [SystemMessage(content=self._instruction)] for message in messages: if isinstance(message, BaseChatMessage): - llm_messages.extend(message.to_llm_messages()) + llm_messages.append(message.content_to_model_message()) llm_messages.append(SystemMessage(content=self._response_prompt)) completion = await self._model_client.create(messages=llm_messages, cancellation_token=cancellation_token) assert isinstance(completion.content, str) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 9e00cb9cca41..877f3ba79156 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -11,14 +11,23 @@ class and includes specific fields relevant to the type of message being sent. from autogen_core.memory import MemoryContent from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage, UserMessage from pydantic import BaseModel, ConfigDict, Field, computed_field -from typing_extensions import Annotated +from typing_extensions import Annotated, Self class BaseMessage(BaseModel, ABC): - """Base class for all message types.""" + """Base class for all message types in AgentChat. + + .. warning:: + + If you want to create a new message type, do not inherit from this class. + Instead, inherit from :class:`BaseChatMessage` or :class:`BaseAgentEvent` + to clarify the purpose of the message type. + + """ content: Any - """The content of the message.""" + """The content of the message. The type is expected to be specified by the + subclass.""" source: str """The name of the agent that sent this message.""" @@ -33,48 +42,78 @@ class BaseMessage(BaseModel, ABC): @computed_field def type(self) -> str: - """Get the class name.""" + """The class name of this message.""" return self.__class__.__name__ @abstractmethod def content_to_render(self) -> str: """Convert the content of the message to a string-only representation. - This is used for rendering the message in the UI.""" + This is used for rendering the message in console or user interface.""" ... + def dump(self) -> Mapping[str, Any]: + """Convert the message to a JSON-serializable dictionary. + + The default implementation uses the Pydantic model's `model_dump` method. + + If you want to customize the serialization, override this method. + """ + return self.model_dump() + + @classmethod + def load(cls, data: Mapping[str, Any]) -> Self: + """Create a message from a dictionary of JSON-serializable data. + + The default implementation uses the Pydantic model's `model_validate` method. + If you want to customize the deserialization, override this method. + """ + return cls.model_validate(data) + class BaseChatMessage(BaseMessage, ABC): """Base class for chat messages. + .. note:: + + If you want to create a new message type that is used for agent-to-agent + communication, inherit from this class, or simply use + :class:`StructuredMessage` if your content type is a subclass of + Pydantic BaseModel. + This class is used for messages that are sent between agents in a chat - conversation. Agents are expected to use process the content of the - message using either models or code and return a response as another - chat message.""" + conversation. Agents are expected to process the content of the + message using models and return a response as another :class:`BaseChatMessage`. + """ @abstractmethod def content_to_str(self) -> str: """Convert the content of the message to a string-only representation. This is used for creating a text-only content for models. - This is not used for rendering the message in the UI. For that, use - :meth:`content_to_render`. + This is not used for rendering the message in console. For that, use + :meth:`~BaseMessage.content_to_render`. - The difference between this and :meth:`to_llm_messages` is that this + The difference between this and :meth:`content_to_model_message` is that this is used to construct parts of the a message for the model client, - while :meth:`to_llm_messages` is used to create complete messages + while :meth:`content_to_model_message` is used to create a complete message for the model client. """ ... @abstractmethod - def to_llm_messages(self) -> List[LLMMessage]: - """Convert the message to a list of :class:`~autogen_core.models.LLMMessage` - for use with the model client.""" + def content_to_model_message(self) -> UserMessage: + """Convert the message content to a :class:`~autogen_core.models.UserMessage` + for use with model client, e.g., :class:`~autogen_core.models.ChatCompletionClient`.""" ... class BaseTextChatMessage(BaseChatMessage, ABC): - """Base class for all text-only chat message types.""" + """Base class for all text-only :class:`BaseChatMessage` types. + It has implementations for :meth:`content_to_str`, :meth:`content_to_render`, + :meth:`content_to_model_message` methods. + + Inherit from this class if your message content type is a string. + """ content: str """The content of the message.""" @@ -85,16 +124,24 @@ def content_to_render(self) -> str: def content_to_str(self) -> str: return self.content - def to_llm_messages(self) -> List[LLMMessage]: - return [UserMessage(content=self.content, source=self.source)] + def content_to_model_message(self) -> UserMessage: + return UserMessage(content=self.content, source=self.source) class BaseAgentEvent(BaseMessage, ABC): """Base class for agent events. + .. note:: + + If you want to create a new message type for signaling observable events + to user and application, inherit from this class. + Agent events are used to signal actions and thoughts produced by agents and teams to user and applications. They are not used for agent-to-agent communication and are not expected to be processed by other agents. + + You should override the :meth:`content_to_render` method if you want to provide + a custom rendering of the content. """ def content_to_render(self) -> str: @@ -106,7 +153,30 @@ def content_to_render(self) -> str: class StructuredMessage(BaseChatMessage, Generic[StructuredContentType]): - """A structured message with a specific content type.""" + """A :class:`BaseChatMessage` type with an unspecified content type. + + To create a new structured message type, specify the content type + as a subclass of `Pydantic BaseModel `_. + + .. code-block:: python + + from pydantic import BaseModel + from autogen_agentchat.messages import StructuredMessage + + + class MyMessageContent(BaseModel): + text: str + number: int + + + message = StructuredMessage[MyMessageContent]( + content=MyMessageContent(text="Hello", number=42), + source="agent1", + ) + + print(message.content_to_str()) # {"text": "Hello", "number": 42} + + """ content: StructuredContentType """The content of the message. Must be a subclass of @@ -118,13 +188,11 @@ def content_to_render(self) -> str: def content_to_str(self) -> str: return self.content.model_dump_json() - def to_llm_messages(self) -> List[LLMMessage]: - return [ - UserMessage( - content=self.content.model_dump_json(), - source=self.source, - ) - ] + def content_to_model_message(self) -> UserMessage: + return UserMessage( + content=self.content.model_dump_json(), + source=self.source, + ) class TextMessage(BaseTextChatMessage): @@ -169,8 +237,8 @@ def content_to_render(self, iterm: bool = False) -> str: result.append("") return "\n".join(result) - def to_llm_messages(self) -> List[LLMMessage]: - return [UserMessage(content=self.content, source=self.source)] + def content_to_model_message(self) -> UserMessage: + return UserMessage(content=self.content, source=self.source) class StopMessage(BaseTextChatMessage): @@ -188,9 +256,6 @@ class HandoffMessage(BaseTextChatMessage): context: List[LLMMessage] = [] """The model context to be passed to the target agent.""" - def to_llm_messages(self) -> List[LLMMessage]: - return [*self.context, UserMessage(content=self.content, source=self.source)] - class ToolCallSummaryMessage(BaseTextChatMessage): """A message signaling the summary of tool call results.""" @@ -249,7 +314,7 @@ class ThoughtEvent(BaseAgentEvent): TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type"), ] -"""Builtin chat message types for agent-to-agent communication only.""" +"""Union of built-in :class:`BaseChatMessage`. This does not include the :class:`StructuredMessage` class.""" AgentEvent = Annotated[ @@ -261,11 +326,13 @@ class ThoughtEvent(BaseAgentEvent): | ThoughtEvent, Field(discriminator="type"), ] -"""Builtin agent events emitted by agents and teams when they work, not used for agent-to-agent communication.""" +"""Union of built-in :class:`BaseAgentEvent`.""" class MessageFactory: - """A factory for creating messages from JSON-serializable dictionaries. + """:meta private: + + A factory for creating messages from JSON-serializable dictionaries. This is useful for deserializing messages from JSON data. """ @@ -318,13 +385,17 @@ def create(self, data: Mapping[str, Any]) -> BaseMessage: # Create an instance of the message class. assert issubclass(message_class, BaseMessage) - return message_class.model_validate(data) + return message_class.load(data) __all__ = [ "AgentEvent", "BaseMessage", + "BaseChatMessage", + "BaseAgentEvent", + "BaseTextChatMessage", "ChatMessage", + "StructuredContentType", "StructuredMessage", "HandoffMessage", "MultiModalMessage", diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index b2a22ca4cdc2..330057672ffe 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -121,7 +121,7 @@ async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> None: async def save_state(self) -> Mapping[str, Any]: agent_state = await self._agent.save_state() state = ChatAgentContainerState( - agent_state=agent_state, message_buffer=[message.model_dump() for message in self._message_buffer] + agent_state=agent_state, message_buffer=[message.dump() for message in self._message_buffer] ) return state.model_dump() diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index 656104651463..f6baaa8eb249 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -206,7 +206,7 @@ async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> async def save_state(self) -> Mapping[str, Any]: state = MagenticOneOrchestratorState( - message_thread=[msg.model_dump() for msg in self._message_thread], + message_thread=[msg.dump() for msg in self._message_thread], current_turn=self._current_turn, task=self._task, facts=self._facts, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 3aa5e37007ee..c3b962975079 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -55,7 +55,7 @@ async def reset(self) -> None: async def save_state(self) -> Mapping[str, Any]: state = RoundRobinManagerState( - message_thread=[message.model_dump() for message in self._message_thread], + message_thread=[message.dump() for message in self._message_thread], current_turn=self._current_turn, next_speaker_index=self._next_speaker_index, ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 8ffe8da30784..347d91b86215 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -91,7 +91,7 @@ async def reset(self) -> None: async def save_state(self) -> Mapping[str, Any]: state = SelectorManagerState( - message_thread=[msg.model_dump() for msg in self._message_thread], + message_thread=[msg.dump() for msg in self._message_thread], current_turn=self._current_turn, previous_speaker=self._previous_speaker, ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index 12acdd83c1e2..0da78e6f39f1 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -92,7 +92,7 @@ async def select_speaker(self, thread: List[BaseMessage]) -> str: async def save_state(self) -> Mapping[str, Any]: state = SwarmManagerState( - message_thread=[msg.model_dump() for msg in self._message_thread], + message_thread=[msg.dump() for msg in self._message_thread], current_turn=self._current_turn, current_speaker=self._current_speaker, ) diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 310ac071a35f..71312ae3b9a5 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -104,7 +104,7 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: class _UnknownMessageType(BaseChatMessage): - def to_llm_messages(self) -> List[LLMMessage]: + def content_to_model_message(self) -> UserMessage: raise NotImplementedError("This message type is not supported.") def content_to_str(self) -> str: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index 5748178db669..c8014d7dc448 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -1,747 +1,747 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Custom Agents\n", - "\n", - "You may have agents with behaviors that do not fall into a preset. \n", - "In such cases, you can build custom agents.\n", - "\n", - "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", - "class and implement the following abstract methods and attributes:\n", - "\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n", - "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", - "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", - "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", - "yields all messages in the response." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CountDownAgent\n", - "\n", - "In this example, we create a simple agent that counts down from a given number to zero,\n", - "and produces a stream of messages with the current count." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3...\n", - "2...\n", - "1...\n", - "Done!\n" - ] - } - ], - "source": [ - "from typing import AsyncGenerator, List, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, TextMessage\n", - "from autogen_core import CancellationToken\n", - "\n", - "\n", - "class CountDownAgent(BaseChatAgent):\n", - " def __init__(self, name: str, count: int = 3):\n", - " super().__init__(name, \"A simple agent that counts down.\")\n", - " self._count = count\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Calls the on_messages_stream.\n", - " response: Response | None = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " response = message\n", - " assert response is not None\n", - " return response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", - " inner_messages: List[BaseMessage] = []\n", - " for i in range(self._count, 0, -1):\n", - " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", - " inner_messages.append(msg)\n", - " yield msg\n", - " # The response is returned at the end of the stream.\n", - " # It contains the final message and all the inner messages.\n", - " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_countdown_agent() -> None:\n", - " # Create a countdown agent.\n", - " countdown_agent = CountDownAgent(\"countdown\")\n", - "\n", - " # Run the agent with a given task and stream the response.\n", - " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", - " if isinstance(message, Response):\n", - " print(message.chat_message.content)\n", - " else:\n", - " print(message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", - "await run_countdown_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ArithmeticAgent\n", - "\n", - "In this example, we create an agent class that can perform simple arithmetic operations\n", - "on a given integer. Then, we will use different instances of this agent class\n", - "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", - "\n", - "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", - "after applying an arithmetic operation to the integer.\n", - "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", - "and returns a response with the result." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Callable, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import BaseChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "\n", - "class ArithmeticAgent(BaseChatAgent):\n", - " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", - " super().__init__(name, description=description)\n", - " self._operator_func = operator_func\n", - " self._message_history: List[BaseChatMessage] = []\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Update the message history.\n", - " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", - " self._message_history.extend(messages)\n", - " # Parse the number in the last message.\n", - " assert isinstance(self._message_history[-1], TextMessage)\n", - " number = int(self._message_history[-1].content)\n", - " # Apply the operator function to the number.\n", - " result = self._operator_func(number)\n", - " # Create a new message with the result.\n", - " response_message = TextMessage(content=str(result), source=self.name)\n", - " # Update the message history.\n", - " self._message_history.append(response_message)\n", - " # Return the response.\n", - " return Response(chat_message=response_message)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "The `on_messages` method may be called with an empty list of messages, in which\n", - "case it means the agent was called previously and is now being called again,\n", - "without any new messages from the caller. So it is important to keep a history\n", - "of the previous messages received by the agent, and use that history to generate\n", - "the response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", - "\n", - "- one that adds 1 to the input integer,\n", - "- one that subtracts 1 from the input integer,\n", - "- one that multiplies the input integer by 2,\n", - "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", - "- one that returns the input integer unchanged.\n", - "\n", - "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", - "and set the appropriate selector settings:\n", - "\n", - "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", - "- customize the selector prompt to tailor the model's response to the specific task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Apply the operations to turn the given number into 25.\n", - "---------- user ----------\n", - "10\n", - "---------- multiply_agent ----------\n", - "20\n", - "---------- add_agent ----------\n", - "21\n", - "---------- multiply_agent ----------\n", - "42\n", - "---------- divide_agent ----------\n", - "21\n", - "---------- add_agent ----------\n", - "22\n", - "---------- add_agent ----------\n", - "23\n", - "---------- add_agent ----------\n", - "24\n", - "---------- add_agent ----------\n", - "25\n", - "---------- Summary ----------\n", - "Number of messages: 10\n", - "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", - "Total prompt tokens: 0\n", - "Total completion tokens: 0\n", - "Duration: 2.40 seconds\n" - ] - } - ], - "source": [ - "async def run_number_agents() -> None:\n", - " # Create agents for number operations.\n", - " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", - " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", - " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", - " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", - " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", - "\n", - " # The termination condition is to stop after 10 messages.\n", - " termination_condition = MaxMessageTermination(10)\n", - "\n", - " # Create a selector group chat.\n", - " selector_group_chat = SelectorGroupChat(\n", - " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " termination_condition=termination_condition,\n", - " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", - " selector_prompt=(\n", - " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", - " \"Current conversation history:\\n{history}\\n\"\n", - " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", - " ),\n", - " )\n", - "\n", - " # Run the selector group chat with a given task and stream the response.\n", - " task: List[BaseChatMessage] = [\n", - " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", - " TextMessage(content=\"10\", source=\"user\"),\n", - " ]\n", - " stream = selector_group_chat.run_stream(task=task)\n", - " await Console(stream)\n", - "\n", - "\n", - "# Use asyncio.run(run_number_agents()) when running in a script.\n", - "await run_number_agents()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the output, we can see that the agents have successfully transformed the input integer\n", - "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Custom Model Clients in Custom Agents\n", - "\n", - "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", - "\n", - "You can accomplish this with a custom agent that implements *your custom model client*.\n", - "\n", - "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", - "\n", - "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", - "\n", - "```bash\n", - "pip install google-genai\n", - "``` " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install google-genai\n", - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, StructuredMessage, BaseMessage\n", - "from autogen_core import CancellationToken\n", - "from autogen_core.model_context import UnboundedChatCompletionContext\n", - "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", - "from google import genai\n", - "from google.genai import types\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent):\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What is the capital of New York?\n", - "---------- gemini_assistant ----------\n", - "Albany\n", - "TERMINATE\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", - "\n", - "Now, let us explore how to use this custom agent as part of a team in AgentChat." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a Haiku poem with 4 lines about the fall season.\n", - "---------- primary ----------\n", - "Crimson leaves cascade, \n", - "Whispering winds sing of change, \n", - "Chill wraps the fading, \n", - "Nature's quilt, rich and warm.\n", - "---------- gemini_critic ----------\n", - "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", - "\n", - "---------- primary ----------\n", - "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", - "\n", - "Crimson leaves drift down, \n", - "Chill winds whisper through the gold, \n", - "Autumn’s breath is near.\n", - "---------- gemini_critic ----------\n", - "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 39, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create a critic agent based on our new GeminiAssistantAgent.\n", - "gemini_critic_agent = GeminiAssistantAgent(\n", - " \"gemini_critic\",\n", - " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")\n", - "\n", - "\n", - "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", - "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", - "\n", - "# Create a team with the primary and critic agents.\n", - "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", - "\n", - "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In section above, we show several very important concepts:\n", - "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", - "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Making the Custom Agent Declarative \n", - "\n", - "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", - "\n", - "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", - "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, BaseMessage\n", - "from autogen_core import CancellationToken, Component\n", - "from pydantic import BaseModel\n", - "from typing_extensions import Self\n", - "\n", - "\n", - "class GeminiAssistantAgentConfig(BaseModel):\n", - " name: str\n", - " description: str = \"An agent that provides assistance with ability to use tools.\"\n", - " model: str = \"gemini-1.5-flash-002\"\n", - " system_message: str | None = None\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", - " component_config_schema = GeminiAssistantAgentConfig\n", - " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", - "\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - "\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()\n", - "\n", - " @classmethod\n", - " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", - " return cls(\n", - " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", - " )\n", - "\n", - " def _to_config(self) -> GeminiAssistantAgentConfig:\n", - " return GeminiAssistantAgentConfig(\n", - " name=self.name,\n", - " description=self.description,\n", - " model=self._model,\n", - " system_message=self._system_message,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", - " \n", - " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"provider\": \"__main__.GeminiAssistantAgent\",\n", - " \"component_type\": \"agent\",\n", - " \"version\": 1,\n", - " \"component_version\": 1,\n", - " \"description\": null,\n", - " \"label\": \"GeminiAssistantAgent\",\n", - " \"config\": {\n", - " \"name\": \"gemini_assistant\",\n", - " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", - " \"model\": \"gemini-1.5-flash-002\",\n", - " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", - " }\n", - "}\n", - "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" - ] - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "config = gemini_assistant.dump_component()\n", - "print(config.model_dump_json(indent=2))\n", - "loaded_agent = GeminiAssistantAgent.load_component(config)\n", - "print(loaded_agent)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps \n", - "\n", - "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", - "\n", - "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", - "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Custom Agents\n", + "\n", + "You may have agents with behaviors that do not fall into a preset. \n", + "In such cases, you can build custom agents.\n", + "\n", + "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", + "class and implement the following abstract methods and attributes:\n", + "\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n", + "\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", + "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", + "yields all messages in the response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CountDownAgent\n", + "\n", + "In this example, we create a simple agent that counts down from a given number to zero,\n", + "and produces a stream of messages with the current count." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3...\n", + "2...\n", + "1...\n", + "Done!\n" + ] + } + ], + "source": [ + "from typing import AsyncGenerator, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, TextMessage\n", + "from autogen_core import CancellationToken\n", + "\n", + "\n", + "class CountDownAgent(BaseChatAgent):\n", + " def __init__(self, name: str, count: int = 3):\n", + " super().__init__(name, \"A simple agent that counts down.\")\n", + " self._count = count\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Calls the on_messages_stream.\n", + " response: Response | None = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " response = message\n", + " assert response is not None\n", + " return response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " inner_messages: List[BaseMessage] = []\n", + " for i in range(self._count, 0, -1):\n", + " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", + " inner_messages.append(msg)\n", + " yield msg\n", + " # The response is returned at the end of the stream.\n", + " # It contains the final message and all the inner messages.\n", + " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_countdown_agent() -> None:\n", + " # Create a countdown agent.\n", + " countdown_agent = CountDownAgent(\"countdown\")\n", + "\n", + " # Run the agent with a given task and stream the response.\n", + " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", + " if isinstance(message, Response):\n", + " print(message.chat_message.content)\n", + " else:\n", + " print(message.content)\n", + "\n", + "\n", + "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", + "await run_countdown_agent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ArithmeticAgent\n", + "\n", + "In this example, we create an agent class that can perform simple arithmetic operations\n", + "on a given integer. Then, we will use different instances of this agent class\n", + "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", + "\n", + "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", + "after applying an arithmetic operation to the integer.\n", + "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", + "and returns a response with the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Callable, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.conditions import MaxMessageTermination\n", + "from autogen_agentchat.messages import BaseChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class ArithmeticAgent(BaseChatAgent):\n", + " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", + " super().__init__(name, description=description)\n", + " self._operator_func = operator_func\n", + " self._message_history: List[BaseChatMessage] = []\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Update the message history.\n", + " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", + " self._message_history.extend(messages)\n", + " # Parse the number in the last message.\n", + " assert isinstance(self._message_history[-1], TextMessage)\n", + " number = int(self._message_history[-1].content)\n", + " # Apply the operator function to the number.\n", + " result = self._operator_func(number)\n", + " # Create a new message with the result.\n", + " response_message = TextMessage(content=str(result), source=self.name)\n", + " # Update the message history.\n", + " self._message_history.append(response_message)\n", + " # Return the response.\n", + " return Response(chat_message=response_message)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "The `on_messages` method may be called with an empty list of messages, in which\n", + "case it means the agent was called previously and is now being called again,\n", + "without any new messages from the caller. So it is important to keep a history\n", + "of the previous messages received by the agent, and use that history to generate\n", + "the response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", + "\n", + "- one that adds 1 to the input integer,\n", + "- one that subtracts 1 from the input integer,\n", + "- one that multiplies the input integer by 2,\n", + "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", + "- one that returns the input integer unchanged.\n", + "\n", + "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", + "and set the appropriate selector settings:\n", + "\n", + "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", + "- customize the selector prompt to tailor the model's response to the specific task." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Apply the operations to turn the given number into 25.\n", + "---------- user ----------\n", + "10\n", + "---------- multiply_agent ----------\n", + "20\n", + "---------- add_agent ----------\n", + "21\n", + "---------- multiply_agent ----------\n", + "42\n", + "---------- divide_agent ----------\n", + "21\n", + "---------- add_agent ----------\n", + "22\n", + "---------- add_agent ----------\n", + "23\n", + "---------- add_agent ----------\n", + "24\n", + "---------- add_agent ----------\n", + "25\n", + "---------- Summary ----------\n", + "Number of messages: 10\n", + "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", + "Total prompt tokens: 0\n", + "Total completion tokens: 0\n", + "Duration: 2.40 seconds\n" + ] + } + ], + "source": [ + "async def run_number_agents() -> None:\n", + " # Create agents for number operations.\n", + " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", + " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", + " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", + " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", + " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", + "\n", + " # The termination condition is to stop after 10 messages.\n", + " termination_condition = MaxMessageTermination(10)\n", + "\n", + " # Create a selector group chat.\n", + " selector_group_chat = SelectorGroupChat(\n", + " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " termination_condition=termination_condition,\n", + " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", + " selector_prompt=(\n", + " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", + " \"Current conversation history:\\n{history}\\n\"\n", + " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", + " ),\n", + " )\n", + "\n", + " # Run the selector group chat with a given task and stream the response.\n", + " task: List[BaseChatMessage] = [\n", + " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", + " TextMessage(content=\"10\", source=\"user\"),\n", + " ]\n", + " stream = selector_group_chat.run_stream(task=task)\n", + " await Console(stream)\n", + "\n", + "\n", + "# Use asyncio.run(run_number_agents()) when running in a script.\n", + "await run_number_agents()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the output, we can see that the agents have successfully transformed the input integer\n", + "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Custom Model Clients in Custom Agents\n", + "\n", + "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", + "\n", + "You can accomplish this with a custom agent that implements *your custom model client*.\n", + "\n", + "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", + "\n", + "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", + "\n", + "```bash\n", + "pip install google-genai\n", + "``` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install google-genai\n", + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, StructuredMessage\n", + "from autogen_core import CancellationToken\n", + "from autogen_core.model_context import UnboundedChatCompletionContext\n", + "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", + "from google import genai\n", + "from google.genai import types\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent):\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What is the capital of New York?\n", + "---------- gemini_assistant ----------\n", + "Albany\n", + "TERMINATE\n", + "\n" + ] }, - "nbformat": 4, - "nbformat_minor": 2 + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", + "\n", + "Now, let us explore how to use this custom agent as part of a team in AgentChat." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a Haiku poem with 4 lines about the fall season.\n", + "---------- primary ----------\n", + "Crimson leaves cascade, \n", + "Whispering winds sing of change, \n", + "Chill wraps the fading, \n", + "Nature's quilt, rich and warm.\n", + "---------- gemini_critic ----------\n", + "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", + "\n", + "---------- primary ----------\n", + "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", + "\n", + "Crimson leaves drift down, \n", + "Chill winds whisper through the gold, \n", + "Autumn’s breath is near.\n", + "---------- gemini_critic ----------\n", + "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create a critic agent based on our new GeminiAssistantAgent.\n", + "gemini_critic_agent = GeminiAssistantAgent(\n", + " \"gemini_critic\",\n", + " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")\n", + "\n", + "\n", + "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", + "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", + "\n", + "# Create a team with the primary and critic agents.\n", + "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", + "\n", + "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In section above, we show several very important concepts:\n", + "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", + "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Making the Custom Agent Declarative \n", + "\n", + "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", + "\n", + "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", + "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import BaseChatMessage, BaseMessage\n", + "from autogen_core import CancellationToken, Component\n", + "from pydantic import BaseModel\n", + "from typing_extensions import Self\n", + "\n", + "\n", + "class GeminiAssistantAgentConfig(BaseModel):\n", + " name: str\n", + " description: str = \"An agent that provides assistance with ability to use tools.\"\n", + " model: str = \"gemini-1.5-flash-002\"\n", + " system_message: str | None = None\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", + " component_config_schema = GeminiAssistantAgentConfig\n", + " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", + "\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " if isinstance(msg, StructuredMessage):\n", + " serialized_content = msg.content.model_dump_json()\n", + " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", + " else:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + "\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()\n", + "\n", + " @classmethod\n", + " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", + " return cls(\n", + " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", + " )\n", + "\n", + " def _to_config(self) -> GeminiAssistantAgentConfig:\n", + " return GeminiAssistantAgentConfig(\n", + " name=self.name,\n", + " description=self.description,\n", + " model=self._model,\n", + " system_message=self._system_message,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", + " \n", + " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"provider\": \"__main__.GeminiAssistantAgent\",\n", + " \"component_type\": \"agent\",\n", + " \"version\": 1,\n", + " \"component_version\": 1,\n", + " \"description\": null,\n", + " \"label\": \"GeminiAssistantAgent\",\n", + " \"config\": {\n", + " \"name\": \"gemini_assistant\",\n", + " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", + " \"model\": \"gemini-1.5-flash-002\",\n", + " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", + " }\n", + "}\n", + "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" + ] + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "config = gemini_assistant.dump_component()\n", + "print(config.model_dump_json(indent=2))\n", + "loaded_agent = GeminiAssistantAgent.load_component(config)\n", + "print(loaded_agent)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps \n", + "\n", + "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", + "\n", + "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", + "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 35c9052dee75..c14d008d17c4 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -11,8 +11,8 @@ "\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of messages that subclass {py:class}`~autogen_agentchat.messages.BaseChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.BaseMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", "\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb index a3f5a25519dd..ff5fc25542c4 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb @@ -23,7 +23,7 @@ "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", "\n", "### Agent-Agent Messages\n", - "AgentChat supports many message types for agent-to-agent communication. They belong to the union type {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", + "AgentChat supports many message types for agent-to-agent communication. They belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", "\n", "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" ] @@ -93,7 +93,7 @@ "source": [ "### Internal Events\n", "\n", - "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the union type {py:class}`~autogen_agentchat.messages.AgentEvent`.\n", + "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseAgentEvent`.\n", "\n", "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", "\n", @@ -107,7 +107,7 @@ ], "metadata": { "kernelspec": { - "display_name": "agnext", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -121,7 +121,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index af55a742cc83..61c768cb20ba 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -6,8 +6,7 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - ChatMessage, - StructuredMessage, + BaseChatMessage, TextMessage, ) from autogen_agentchat.utils import remove_images @@ -85,27 +84,12 @@ def __init__( self._browser = MarkdownFileBrowser(viewport_size=1024 * 5, base_path=base_path) @property - def produced_message_types(self) -> Sequence[type[ChatMessage]]: + def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: return (TextMessage,) - async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: for chat_message in messages: - if isinstance(chat_message, StructuredMessage): - serialized_content = chat_message.content.model_dump_json() - self._chat_history.append( - UserMessage( - content=serialized_content, - source=chat_message.source, - ) - ) - else: - self._chat_history.append( - UserMessage( - content=chat_message.content, - source=chat_message.source, - ) - ) - + self._chat_history.append(chat_message.content_to_model_message()) try: _, content = await self._generate_reply(cancellation_token=cancellation_token) self._chat_history.append(AssistantMessage(content=content, source=self.name)) diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index b7289bae66de..896e64e3af3d 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -31,8 +31,7 @@ ToolCallRequestEvent, ) from autogen_core import CancellationToken, FunctionCall, Image -from autogen_core.models._model_client import ChatCompletionClient -from autogen_core.models._types import FunctionExecutionResult +from autogen_core.models import ChatCompletionClient, FunctionExecutionResult from autogen_core.tools import FunctionTool, Tool from pydantic import BaseModel, Field @@ -521,22 +520,19 @@ async def on_messages_stream( async def handle_incoming_message(self, message: BaseChatMessage, cancellation_token: CancellationToken) -> None: """Handle regular text messages by adding them to the thread.""" - content: str | List[MessageContentPartParam] - if isinstance(message.content, str): - content = message.content - elif isinstance(message.content, BaseModel): - content = message.content.model_dump_json() - elif isinstance(message.content, list): + content: str | List[MessageContentPartParam] | None = None + llm_message = message.content_to_model_message() + if isinstance(llm_message.content, str): + content = llm_message.content + else: content = [] - for c in message.content: + for c in llm_message.content: if isinstance(c, str): content.append(TextContentBlockParam(text=c, type="text")) elif isinstance(c, Image): content.append(ImageURLContentBlockParam(image_url=ImageURLParam(url=c.data_uri), type="image_url")) else: raise ValueError(f"Unsupported content type: {type(c)} in {message.content}") - else: - raise ValueError(f"Unsupported content type: {type(message.content)} in {message}") await cancellation_token.link_future( asyncio.ensure_future( self._client.beta.threads.messages.create( diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index cc9268e54192..c4a12adae24a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -24,7 +24,7 @@ import PIL.Image from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MultiModalMessage, StructuredMessage, TextMessage +from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MultiModalMessage, TextMessage from autogen_agentchat.utils import content_to_str, remove_images from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall from autogen_core import Image as AGImage @@ -432,21 +432,7 @@ async def on_messages_stream( self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken ) -> AsyncGenerator[BaseMessage | Response, None]: for chat_message in messages: - if isinstance(chat_message, StructuredMessage): - serialized_content = chat_message.content.model_dump_json() - self._chat_history.append( - UserMessage( - content=serialized_content, - source=chat_message.source, - ) - ) - else: - self._chat_history.append( - UserMessage( - content=chat_message.content, - source=chat_message.source, - ) - ) + self._chat_history.append(chat_message.content_to_model_message()) self.inner_messages: List[BaseMessage] = [] self.model_usage: List[RequestUsage] = [] diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index 169325e70c3b..3d680ec4526b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -9,7 +9,6 @@ Optional, Tuple, TypeVar, - cast, ) from autogen_agentchat.base import Response, TaskResult @@ -60,7 +59,7 @@ def _extract_message_content(message: BaseMessage) -> Tuple[List[str], List[Imag text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] else: - text_parts = [str(message.content)] + text_parts = [message.content_to_render()] image_parts = [] return text_parts, image_parts @@ -189,9 +188,6 @@ async def RichConsole( # TODO: Handle model client streaming chunk events. pass else: - # Cast required for mypy to be happy - message = cast(BaseMessage, message) # type: ignore - text_parts, image_parts = _extract_message_content(message) # Add usage stats if needed if message.models_usage: diff --git a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py index 43bd3447737a..c557e19e8b8f 100644 --- a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py +++ b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py @@ -8,7 +8,7 @@ import aiofiles import pytest -from autogen_agentchat.messages import ChatMessage, TextMessage +from autogen_agentchat.messages import BaseChatMessage, TextMessage from autogen_core import CancellationToken from autogen_core.tools._base import BaseTool, Tool from autogen_ext.agents.openai import OpenAIAssistantAgent @@ -81,7 +81,7 @@ def __init__(self, id: str, text: str): class FakeCursorPage: - def __init__(self, data: List[ChatMessage | FakeMessage]) -> None: + def __init__(self, data: List[BaseChatMessage | FakeMessage]) -> None: self.data = data def has_next_page(self) -> bool: From 4660f1de7a35d6ed72aa58acba389177b4ff1da5 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 23 Mar 2025 19:34:02 -0700 Subject: [PATCH 07/26] fix quote --- python/packages/agbench/src/agbench/linter/__init__.py | 2 +- python/packages/agbench/src/agbench/linter/_base.py | 5 +++-- python/packages/agbench/src/agbench/linter/cli.py | 8 +++++--- .../agbench/src/agbench/linter/coders/oai_coder.py | 8 +++----- .../teams/_group_chat/_selector_group_chat.py | 2 +- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/python/packages/agbench/src/agbench/linter/__init__.py b/python/packages/agbench/src/agbench/linter/__init__.py index 797b7f272a5b..a104962445f6 100644 --- a/python/packages/agbench/src/agbench/linter/__init__.py +++ b/python/packages/agbench/src/agbench/linter/__init__.py @@ -1,4 +1,4 @@ # __init__.py -from ._base import Code, Document, CodedDocument, BaseQualitativeCoder +from ._base import BaseQualitativeCoder, Code, CodedDocument, Document __all__ = ["Code", "Document", "CodedDocument", "BaseQualitativeCoder"] diff --git a/python/packages/agbench/src/agbench/linter/_base.py b/python/packages/agbench/src/agbench/linter/_base.py index 4f6209b7809c..c59e826d201b 100644 --- a/python/packages/agbench/src/agbench/linter/_base.py +++ b/python/packages/agbench/src/agbench/linter/_base.py @@ -1,7 +1,8 @@ -import json import hashlib +import json import re -from typing import Protocol, List, Set, Optional +from typing import List, Optional, Protocol, Set + from pydantic import BaseModel, Field diff --git a/python/packages/agbench/src/agbench/linter/cli.py b/python/packages/agbench/src/agbench/linter/cli.py index 426890258b69..14f428929b17 100644 --- a/python/packages/agbench/src/agbench/linter/cli.py +++ b/python/packages/agbench/src/agbench/linter/cli.py @@ -1,8 +1,10 @@ -import os import argparse -from typing import List, Sequence, Optional +import os +from typing import List, Optional, Sequence + from openai import OpenAI -from ._base import Document, CodedDocument + +from ._base import CodedDocument, Document from .coders.oai_coder import OAIQualitativeCoder diff --git a/python/packages/agbench/src/agbench/linter/coders/oai_coder.py b/python/packages/agbench/src/agbench/linter/coders/oai_coder.py index 374093d3d81b..01322e0c5ccc 100644 --- a/python/packages/agbench/src/agbench/linter/coders/oai_coder.py +++ b/python/packages/agbench/src/agbench/linter/coders/oai_coder.py @@ -1,13 +1,11 @@ import os import re - -from typing import List, Set, Optional -from pydantic import BaseModel +from typing import List, Optional, Set from openai import OpenAI +from pydantic import BaseModel -from .._base import CodedDocument, Document, Code -from .._base import BaseQualitativeCoder +from .._base import BaseQualitativeCoder, Code, CodedDocument, Document class CodeList(BaseModel): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 347d91b86215..81185b26fed6 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -159,7 +159,7 @@ async def select_speaker(self, thread: List[BaseMessage]) -> str: continue message = f"{msg.source}:" if isinstance(msg, MultiModalMessage): - message += f" {msg.content_to_str(image_placeholder="[image]")}" + message += f" {msg.content_to_str(image_placeholder='[image]')}" else: message += f" {msg.content_to_str()}" history_messages.append( From 2af8c13f15189ef09b50dacf04b97eb5c5258e27 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 24 Mar 2025 02:32:46 -0700 Subject: [PATCH 08/26] fix --- .../autogen_agentchat/teams/_group_chat/_selector_group_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 81185b26fed6..48d835c173e0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -383,7 +383,7 @@ async def book_trip() -> str: from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.ui import Console - from autogen_agentchat.messages import AgentEvent, ChatMessage + from autogen_agentchat.messages import BaseMessage async def main() -> None: From 17370d96d4495548893f560f59a0ec18a7f3ddec Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 09:24:35 -0700 Subject: [PATCH 09/26] address comments --- .../conditions/_terminations.py | 14 +---- .../src/autogen_agentchat/messages.py | 44 ++++++++----- .../_magentic_one_orchestrator.py | 4 +- .../teams/_group_chat/_selector_group_chat.py | 7 +-- .../tests/test_assistant_agent.py | 4 ++ .../tests/test_group_chat.py | 63 ++++++++++++++++--- .../tests/test_magentic_one_group_chat.py | 8 +-- 7 files changed, 98 insertions(+), 46 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 443a172549e0..4d0d2ccafa31 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -10,7 +10,6 @@ BaseChatMessage, BaseMessage, HandoffMessage, - MultiModalMessage, StopMessage, TextMessage, ToolCallExecutionEvent, @@ -109,8 +108,7 @@ class TextMentionTerminationConfig(BaseModel): class TextMentionTermination(TerminationCondition, Component[TextMentionTerminationConfig]): - """Terminate the conversation if a specific text is mentioned. - + """Terminate the conversation if a specific text is mentioned in any :class:`~autogen_agentchat.messages.BaseChatMessage`. Args: text: The text to look for in the messages. @@ -136,18 +134,12 @@ async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._sources is not None and message.source not in self._sources: continue - if isinstance(message.content, str) and self._termination_text in message.content: + content = message.content_to_render() + if self._termination_text in content: self._terminated = True return StopMessage( content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" ) - elif isinstance(message, MultiModalMessage): - for item in message.content: - if isinstance(item, str) and self._termination_text in item: - self._terminated = True - return StopMessage( - content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" - ) return None async def reset(self) -> None: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 877f3ba79156..ca21825c1ae2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -25,10 +25,6 @@ class BaseMessage(BaseModel, ABC): """ - content: Any - """The content of the message. The type is expected to be specified by the - subclass.""" - source: str """The name of the agent that sent this message.""" @@ -48,7 +44,10 @@ def type(self) -> str: @abstractmethod def content_to_render(self) -> str: """Convert the content of the message to a string-only representation. - This is used for rendering the message in console or user interface.""" + This is used for rendering the message in console or user interface. + + This is not used for creating text-only content for models. + For :class:`BaseChatMessage` types, use :meth:`content_to_model_text` instead.""" ... def dump(self) -> Mapping[str, Any]: @@ -86,9 +85,9 @@ class BaseChatMessage(BaseMessage, ABC): """ @abstractmethod - def content_to_str(self) -> str: - """Convert the content of the message to a string-only representation. - This is used for creating a text-only content for models. + def content_to_model_text(self) -> str: + """Convert the content of the message to text-only representation. + This is used for creating text-only content for models. This is not used for rendering the message in console. For that, use :meth:`~BaseMessage.content_to_render`. @@ -121,7 +120,7 @@ class BaseTextChatMessage(BaseChatMessage, ABC): def content_to_render(self) -> str: return self.content - def content_to_str(self) -> str: + def content_to_model_text(self) -> str: return self.content def content_to_model_message(self) -> UserMessage: @@ -144,8 +143,7 @@ class BaseAgentEvent(BaseMessage, ABC): a custom rendering of the content. """ - def content_to_render(self) -> str: - return str(self.content) + ... StructuredContentType = TypeVar("StructuredContentType", bound=BaseModel, covariant=True) @@ -185,7 +183,7 @@ class MyMessageContent(BaseModel): def content_to_render(self) -> str: return self.content.model_dump_json(indent=2) - def content_to_str(self) -> str: + def content_to_model_text(self) -> str: return self.content.model_dump_json() def content_to_model_message(self) -> UserMessage: @@ -207,10 +205,10 @@ class MultiModalMessage(BaseChatMessage): content: List[str | Image] """The content of the message.""" - def content_to_str(self, image_placeholder: str | None = None) -> str: + def content_to_model_text(self, image_placeholder: str | None = "[image]") -> str: """Convert the content of the message to a string-only representation. If an image is present, it will be replaced with the image placeholder - if provided, otherwise it will be a base64 string. + by default, otherwise it will be a base64 string when set to None. """ text = "" for c in self.content: @@ -269,6 +267,9 @@ class ToolCallRequestEvent(BaseAgentEvent): content: List[FunctionCall] """The tool calls.""" + def content_to_render(self) -> str: + return str(self.content) + class ToolCallExecutionEvent(BaseAgentEvent): """An event signaling the execution of tool calls.""" @@ -276,6 +277,9 @@ class ToolCallExecutionEvent(BaseAgentEvent): content: List[FunctionExecutionResult] """The tool call results.""" + def content_to_render(self) -> str: + return str(self.content) + class UserInputRequestedEvent(BaseAgentEvent): """An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback.""" @@ -286,6 +290,9 @@ class UserInputRequestedEvent(BaseAgentEvent): content: Literal[""] = "" """Empty content for compat with consumers expecting a content field.""" + def content_to_render(self) -> str: + return str(self.content) + class MemoryQueryEvent(BaseAgentEvent): """An event signaling the results of memory queries.""" @@ -293,6 +300,9 @@ class MemoryQueryEvent(BaseAgentEvent): content: List[MemoryContent] """The memory query results.""" + def content_to_render(self) -> str: + return str(self.content) + class ModelClientStreamingChunkEvent(BaseAgentEvent): """An event signaling a text output chunk from a model client in streaming mode.""" @@ -300,6 +310,9 @@ class ModelClientStreamingChunkEvent(BaseAgentEvent): content: str """A string chunk from the model client.""" + def content_to_render(self) -> str: + return self.content + class ThoughtEvent(BaseAgentEvent): """An event signaling the thought process of a model. @@ -309,6 +322,9 @@ class ThoughtEvent(BaseAgentEvent): content: str """The thought process of the model.""" + def content_to_render(self) -> str: + return self.content + ChatMessage = Annotated[ TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index f6baaa8eb249..cbce79baed9f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -27,7 +27,7 @@ ToolCallSummaryMessage, ) from ....state import MagenticOneOrchestratorState -from ....utils import content_to_str, remove_images +from ....utils import remove_images from .._base_group_chat_manager import BaseGroupChatManager from .._events import ( GroupChatAgentResponse, @@ -150,7 +150,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No # Create the initial task ledger ################################# # Combine all message contents for task - self._task = " ".join([content_to_str(msg.content) for msg in message.messages]) + self._task = " ".join([msg.content_to_model_text() for msg in message.messages]) planning_conversation: List[LLMMessage] = [] # 1. GATHER FACTS diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 48d835c173e0..a566bf6e0078 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -16,7 +16,6 @@ BaseChatMessage, BaseMessage, MessageFactory, - MultiModalMessage, ) from ...state import SelectorManagerState from ._base_group_chat import BaseGroupChat @@ -157,11 +156,7 @@ async def select_speaker(self, thread: List[BaseMessage]) -> str: if not isinstance(msg, BaseChatMessage): # Only process chat messages. continue - message = f"{msg.source}:" - if isinstance(msg, MultiModalMessage): - message += f" {msg.content_to_str(image_placeholder='[image]')}" - else: - message += f" {msg.content_to_str()}" + message = f"{msg.source}: {msg.content_to_model_text()}" history_messages.append( message.rstrip() + "\n\n" ) # Create some consistency for how messages are separated in the transcript diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 40bb440667df..932a9943fc16 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -762,6 +762,7 @@ async def test_model_client_stream() -> None: chunks: List[str] = [] async for message in agent.run_stream(task="task"): if isinstance(message, TaskResult): + assert isinstance(message.messages[-1], TextMessage) assert message.messages[-1].content == "Response to message 3" elif isinstance(message, ModelClientStreamingChunkEvent): chunks.append(message.content) @@ -795,11 +796,14 @@ async def test_model_client_stream_with_tool_calls() -> None: chunks: List[str] = [] async for message in agent.run_stream(task="task"): if isinstance(message, TaskResult): + assert isinstance(message.messages[-1], TextMessage) + assert isinstance(message.messages[1], ToolCallRequestEvent) assert message.messages[-1].content == "Example response 2 to task" assert message.messages[1].content == [ FunctionCall(id="1", name="_pass_function", arguments=r'{"input": "task"}'), FunctionCall(id="3", name="_echo_function", arguments=r'{"input": "task"}'), ] + assert isinstance(message.messages[2], ToolCallExecutionEvent) assert message.messages[2].content == [ FunctionExecutionResult(call_id="1", content="pass", is_error=False, name="_pass_function"), FunctionExecutionResult(call_id="3", content="task", is_error=False, name="_echo_function"), diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 71312ae3b9a5..f15161de0af6 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -104,10 +104,12 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: class _UnknownMessageType(BaseChatMessage): + content: str + def content_to_model_message(self) -> UserMessage: raise NotImplementedError("This message type is not supported.") - def content_to_str(self) -> str: + def content_to_model_text(self) -> str: raise NotImplementedError("This message type is not supported.") def content_to_render(self) -> str: @@ -205,14 +207,11 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: "Hello, world!", "TERMINATE", ] - # Normalize the messages to remove \r\n and any leading/trailing whitespace. - normalized_messages = [ - msg.content.replace("\r\n", "\n").rstrip("\n") if isinstance(msg.content, str) else msg.content - for msg in result.messages - ] - - # Assert that all expected messages are in the collected messages - assert normalized_messages == expected_messages + for i in range(len(expected_messages)): + message = result.messages[i] + assert isinstance(message, TextMessage) + content = message.content.replace("\r\n", "\n").rstrip("\n") + assert content == expected_messages[i] assert result.stop_reason is not None and result.stop_reason == "Text 'TERMINATE' mentioned" @@ -245,6 +244,8 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: await team.reset() task = MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user") result_2 = await team.run(task=task) + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result_2.messages[0], MultiModalMessage) assert result.messages[0].content == task.content[0] assert result.messages[1:] == result_2.messages[1:] @@ -553,6 +554,7 @@ async def test_selector_group_chat(runtime: AgentRuntime | None) -> None: task="Write a program that prints 'Hello, world!'", ) assert len(result.messages) == 6 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent3" assert result.messages[2].source == "agent2" @@ -667,6 +669,7 @@ async def test_selector_group_chat_two_speakers(runtime: AgentRuntime | None) -> task="Write a program that prints 'Hello, world!'", ) assert len(result.messages) == 5 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent2" assert result.messages[2].source == "agent1" @@ -716,6 +719,7 @@ async def test_selector_group_chat_two_speakers_allow_repeated(runtime: AgentRun ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 4 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent2" assert result.messages[2].source == "agent2" @@ -757,6 +761,7 @@ async def test_selector_group_chat_succcess_after_2_attempts(runtime: AgentRunti ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 2 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent2" @@ -781,6 +786,7 @@ async def test_selector_group_chat_fall_back_to_first_after_3_attempts(runtime: ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 2 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent1" @@ -801,6 +807,7 @@ async def test_selector_group_chat_fall_back_to_previous_after_3_attempts(runtim ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 3 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "Write a program that prints 'Hello, world!'" assert result.messages[1].source == "agent2" assert result.messages[2].source == "agent2" @@ -918,6 +925,12 @@ async def test_swarm_handoff(runtime: AgentRuntime | None) -> None: team = Swarm([second_agent, first_agent, third_agent], termination_condition=termination, runtime=runtime) result = await team.run(task="task") assert len(result.messages) == 6 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) + assert isinstance(result.messages[2], HandoffMessage) + assert isinstance(result.messages[3], HandoffMessage) + assert isinstance(result.messages[4], HandoffMessage) + assert isinstance(result.messages[5], HandoffMessage) assert result.messages[0].content == "task" assert result.messages[1].content == "Transferred to third_agent." assert result.messages[2].content == "Transferred to first_agent." @@ -1051,9 +1064,14 @@ async def test_swarm_handoff_using_tool_calls(runtime: AgentRuntime | None) -> N team = Swarm([agent1, agent2], termination_condition=termination, runtime=runtime) result = await team.run(task="task") assert len(result.messages) == 7 + assert isinstance(result.messages[0], TextMessage) assert result.messages[0].content == "task" assert isinstance(result.messages[1], ToolCallRequestEvent) assert isinstance(result.messages[2], ToolCallExecutionEvent) + assert isinstance(result.messages[3], HandoffMessage) + assert isinstance(result.messages[4], HandoffMessage) + assert isinstance(result.messages[5], TextMessage) + assert isinstance(result.messages[6], TextMessage) assert result.messages[3].content == "handoff to agent2" assert result.messages[4].content == "Transferred to agent1." assert result.messages[5].content == "Hello" @@ -1091,18 +1109,23 @@ async def test_swarm_pause_and_resume(runtime: AgentRuntime | None) -> None: team = Swarm([second_agent, first_agent, third_agent], max_turns=1, runtime=runtime) result = await team.run(task="task") assert len(result.messages) == 2 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) assert result.messages[0].content == "task" assert result.messages[1].content == "Transferred to third_agent." # Resume with a new task. result = await team.run(task="new task") assert len(result.messages) == 2 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) assert result.messages[0].content == "new task" assert result.messages[1].content == "Transferred to first_agent." # Resume with the same task. result = await team.run() assert len(result.messages) == 1 + assert isinstance(result.messages[0], HandoffMessage) assert result.messages[0].content == "Transferred to second_agent." @@ -1177,8 +1200,10 @@ def tool2() -> str: source="agent1", context=expected_handoff_context, ) + assert isinstance(result.messages[4], TextMessage) assert result.messages[4].content == "Hello" assert result.messages[4].source == "agent2" + assert isinstance(result.messages[5], TextMessage) assert result.messages[5].content == "TERMINATE" assert result.messages[5].source == "agent2" @@ -1201,17 +1226,26 @@ async def test_swarm_with_handoff_termination(runtime: AgentRuntime | None) -> N # Start result = await team.run(task="task") assert len(result.messages) == 2 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) assert result.messages[0].content == "task" assert result.messages[1].content == "Transferred to third_agent." # Resume existing. result = await team.run() assert len(result.messages) == 3 + assert isinstance(result.messages[0], HandoffMessage) + assert isinstance(result.messages[1], HandoffMessage) + assert isinstance(result.messages[2], HandoffMessage) assert result.messages[0].content == "Transferred to first_agent." assert result.messages[1].content == "Transferred to second_agent." assert result.messages[2].content == "Transferred to third_agent." # Resume new task. result = await team.run(task="new task") assert len(result.messages) == 4 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) + assert isinstance(result.messages[2], HandoffMessage) + assert isinstance(result.messages[3], HandoffMessage) assert result.messages[0].content == "new task" assert result.messages[1].content == "Transferred to first_agent." assert result.messages[2].content == "Transferred to second_agent." @@ -1224,6 +1258,9 @@ async def test_swarm_with_handoff_termination(runtime: AgentRuntime | None) -> N # Start result = await team.run(task="task") assert len(result.messages) == 3 + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], HandoffMessage) + assert isinstance(result.messages[2], HandoffMessage) assert result.messages[0].content == "task" assert result.messages[1].content == "Transferred to third_agent." assert result.messages[2].content == "Transferred to non_existing_agent." @@ -1236,6 +1273,10 @@ async def test_swarm_with_handoff_termination(runtime: AgentRuntime | None) -> N # Resume with a HandoffMessage result = await team.run(task=HandoffMessage(content="Handoff to first_agent.", target="first_agent", source="user")) assert len(result.messages) == 4 + assert isinstance(result.messages[0], HandoffMessage) + assert isinstance(result.messages[1], HandoffMessage) + assert isinstance(result.messages[2], HandoffMessage) + assert isinstance(result.messages[3], HandoffMessage) assert result.messages[0].content == "Handoff to first_agent." assert result.messages[1].content == "Transferred to second_agent." assert result.messages[2].content == "Transferred to third_agent." @@ -1262,6 +1303,10 @@ async def test_round_robin_group_chat_with_message_list(runtime: AgentRuntime | # Verify the messages were processed in order assert len(result.messages) == 4 # Initial messages + echo until termination + assert isinstance(result.messages[0], TextMessage) + assert isinstance(result.messages[1], TextMessage) + assert isinstance(result.messages[2], TextMessage) + assert isinstance(result.messages[3], TextMessage) assert result.messages[0].content == "Message 1" # First message assert result.messages[1].content == "Message 2" # Second message assert result.messages[2].content == "Message 3" # Third message diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index 51efa761de86..8bc26de670c6 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -134,8 +134,8 @@ async def test_magentic_one_group_chat_basic(runtime: AgentRuntime | None) -> No ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 5 - assert result.messages[2].content == "Continue task" - assert result.messages[4].content == "print('Hello, world!')" + assert result.messages[2].content_to_render() == "Continue task" + assert result.messages[4].content_to_render() == "print('Hello, world!')" assert result.stop_reason is not None and result.stop_reason == "Because" # Test save and load. @@ -214,8 +214,8 @@ async def test_magentic_one_group_chat_with_stalls(runtime: AgentRuntime | None) ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 6 - assert isinstance(result.messages[1].content, str) + assert isinstance(result.messages[1], TextMessage) assert result.messages[1].content.startswith("\nWe are working to address the following user request:") - assert isinstance(result.messages[4].content, str) + assert isinstance(result.messages[4], TextMessage) assert result.messages[4].content.startswith("\nWe are working to address the following user request:") assert result.stop_reason is not None and result.stop_reason == "test" From b0c2ed161b228c4eb0b0c207350874bca2b43701 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 09:47:16 -0700 Subject: [PATCH 10/26] use content methods instead of content itself --- .../agents/_user_proxy_agent.py | 4 +-- .../conditions/_terminations.py | 2 +- .../src/autogen_agentchat/messages.py | 36 ++++++++++--------- .../src/autogen_agentchat/ui/_console.py | 10 +++--- .../tests/test_group_chat.py | 8 ++--- .../tests/test_magentic_one_group_chat.py | 4 +-- .../agentchat-user-guide/migration-guide.md | 4 +-- .../src/autogen_ext/tools/http/_http_tool.py | 2 +- .../src/autogen_ext/tools/mcp/_factory.py | 2 +- .../src/autogen_ext/ui/_rich_console.py | 2 +- python/samples/agentchat_chess_game/main.py | 3 +- python/samples/agentchat_streamlit/agent.py | 4 +-- 12 files changed, 42 insertions(+), 39 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 9b5c8a449b6b..785f0b08f1d3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -82,7 +82,7 @@ async def simple_user_agent(): cancellation_token=CancellationToken(), ) ) - print(f"Your name is {response.chat_message.content}") + print(f"Your name is {response.chat_message.content_to_text()}") Example: Cancellable usage case:: @@ -117,7 +117,7 @@ async def cancellable_user_agent(): ) ) response = await agent_task - print(f"Your name is {response.chat_message.content}") + print(f"Your name is {response.chat_message.content_to_text()}") except Exception as e: print(f"Exception: {e}") except BaseException as e: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 4d0d2ccafa31..db6203345707 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -134,7 +134,7 @@ async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: if self._sources is not None and message.source not in self._sources: continue - content = message.content_to_render() + content = message.content_to_text() if self._termination_text in content: self._terminated = True return StopMessage( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index ca21825c1ae2..6ffdfce4c242 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -25,6 +25,10 @@ class BaseMessage(BaseModel, ABC): """ + content: Any + """The content of the message. This can be any type, but it is expected to be + specified in the derived class.""" + source: str """The name of the agent that sent this message.""" @@ -42,9 +46,9 @@ def type(self) -> str: return self.__class__.__name__ @abstractmethod - def content_to_render(self) -> str: - """Convert the content of the message to a string-only representation. - This is used for rendering the message in console or user interface. + def content_to_text(self) -> str: + """Convert the content of the message to a string-only representation + that can be rendered in the console and inspected by the user. This is not used for creating text-only content for models. For :class:`BaseChatMessage` types, use :meth:`content_to_model_text` instead.""" @@ -90,7 +94,7 @@ def content_to_model_text(self) -> str: This is used for creating text-only content for models. This is not used for rendering the message in console. For that, use - :meth:`~BaseMessage.content_to_render`. + :meth:`~BaseMessage.content_to_text`. The difference between this and :meth:`content_to_model_message` is that this is used to construct parts of the a message for the model client, @@ -108,8 +112,8 @@ def content_to_model_message(self) -> UserMessage: class BaseTextChatMessage(BaseChatMessage, ABC): """Base class for all text-only :class:`BaseChatMessage` types. - It has implementations for :meth:`content_to_str`, :meth:`content_to_render`, - :meth:`content_to_model_message` methods. + It has implementations for :meth:`content_to_text`, :meth:`content_to_model_text`, + and :meth:`content_to_model_message` methods. Inherit from this class if your message content type is a string. """ @@ -117,7 +121,7 @@ class BaseTextChatMessage(BaseChatMessage, ABC): content: str """The content of the message.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return self.content def content_to_model_text(self) -> str: @@ -139,7 +143,7 @@ class BaseAgentEvent(BaseMessage, ABC): and teams to user and applications. They are not used for agent-to-agent communication and are not expected to be processed by other agents. - You should override the :meth:`content_to_render` method if you want to provide + You should override the :meth:`content_to_text` method if you want to provide a custom rendering of the content. """ @@ -180,7 +184,7 @@ class MyMessageContent(BaseModel): """The content of the message. Must be a subclass of `Pydantic BaseModel `_.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return self.content.model_dump_json(indent=2) def content_to_model_text(self) -> str: @@ -221,7 +225,7 @@ def content_to_model_text(self, image_placeholder: str | None = "[image]") -> st text += f" {c.to_base64()}" return text - def content_to_render(self, iterm: bool = False) -> str: + def content_to_text(self, iterm: bool = False) -> str: result: List[str] = [] for c in self.content: if isinstance(c, str): @@ -267,7 +271,7 @@ class ToolCallRequestEvent(BaseAgentEvent): content: List[FunctionCall] """The tool calls.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return str(self.content) @@ -277,7 +281,7 @@ class ToolCallExecutionEvent(BaseAgentEvent): content: List[FunctionExecutionResult] """The tool call results.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return str(self.content) @@ -290,7 +294,7 @@ class UserInputRequestedEvent(BaseAgentEvent): content: Literal[""] = "" """Empty content for compat with consumers expecting a content field.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return str(self.content) @@ -300,7 +304,7 @@ class MemoryQueryEvent(BaseAgentEvent): content: List[MemoryContent] """The memory query results.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return str(self.content) @@ -310,7 +314,7 @@ class ModelClientStreamingChunkEvent(BaseAgentEvent): content: str """A string chunk from the model client.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return self.content @@ -322,7 +326,7 @@ class ThoughtEvent(BaseAgentEvent): content: str """The thought process of the model.""" - def content_to_render(self) -> str: + def content_to_text(self) -> str: return self.content diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 919b843aae22..84b2396089f4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -135,9 +135,9 @@ async def Console( # Print final response. if isinstance(message.chat_message, MultiModalMessage): - final_content = message.chat_message.content_to_render(iterm=render_image_iterm) + final_content = message.chat_message.content_to_text(iterm=render_image_iterm) else: - final_content = message.chat_message.content_to_render() + final_content = message.chat_message.content_to_text() output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{final_content}\n" if message.chat_message.models_usage: if output_stats: @@ -174,7 +174,7 @@ async def Console( # Print message sender. await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True) if isinstance(message, ModelClientStreamingChunkEvent): - await aprint(message.content_to_render(), end="") + await aprint(message.content_to_text(), end="") streaming_chunks.append(message.content) else: if streaming_chunks: @@ -182,9 +182,9 @@ async def Console( # Chunked messages are already printed, so we just print a newline. await aprint("", end="\n", flush=True) elif isinstance(message, MultiModalMessage): - await aprint(message.content_to_render(iterm=render_image_iterm), end="\n", flush=True) + await aprint(message.content_to_text(iterm=render_image_iterm), end="\n", flush=True) else: - await aprint(message.content_to_render(), end="\n", flush=True) + await aprint(message.content_to_text(), end="\n", flush=True) if message.models_usage: if output_stats: await aprint( diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index f15161de0af6..e7be4ebdba86 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -112,7 +112,7 @@ def content_to_model_message(self) -> UserMessage: def content_to_model_text(self) -> str: raise NotImplementedError("This message type is not supported.") - def content_to_render(self) -> str: + def content_to_text(self) -> str: raise NotImplementedError("This message type is not supported.") @@ -208,9 +208,9 @@ async def test_round_robin_group_chat(runtime: AgentRuntime | None) -> None: "TERMINATE", ] for i in range(len(expected_messages)): - message = result.messages[i] - assert isinstance(message, TextMessage) - content = message.content.replace("\r\n", "\n").rstrip("\n") + produced_message = result.messages[i] + assert isinstance(produced_message, TextMessage) + content = produced_message.content.replace("\r\n", "\n").rstrip("\n") assert content == expected_messages[i] assert result.stop_reason is not None and result.stop_reason == "Text 'TERMINATE' mentioned" diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index 8bc26de670c6..f4f00db1c854 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -134,8 +134,8 @@ async def test_magentic_one_group_chat_basic(runtime: AgentRuntime | None) -> No ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 5 - assert result.messages[2].content_to_render() == "Continue task" - assert result.messages[4].content_to_render() == "print('Hello, world!')" + assert result.messages[2].content_to_text() == "Continue task" + assert result.messages[4].content_to_text() == "print('Hello, world!')" assert result.stop_reason is not None and result.stop_reason == "Because" # Test save and load. diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 8cc2b1041b5d..74fdda4b23ac 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -691,7 +691,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content) + print("Assistant:", response.chat_message.content_to_render()) await model_client.close() asyncio.run(main()) @@ -1330,7 +1330,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content) + print("Assistant:", response.chat_message.content_to_render()) await model_client.close() diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py index 451d5826bad7..6603857b3557 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py @@ -126,7 +126,7 @@ async def main(): [TextMessage(content="Can you base64 decode the value 'YWJjZGU=', please?", source="user")], CancellationToken(), ) - print(response.chat_message.content) + print(response.chat_message.content_to_text()) asyncio.run(main()) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py index 3eb8634b3698..973ad508f4cc 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py @@ -105,7 +105,7 @@ async def main() -> None: # Let the agent fetch the content of a URL and summarize it. result = await agent.run(task="Summarize the content of https://en.wikipedia.org/wiki/Seattle") - print(result.messages[-1].content) + print(result.messages[-1].content_to_text()) asyncio.run(main()) diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index 3d680ec4526b..acdaa215b737 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -59,7 +59,7 @@ def _extract_message_content(message: BaseMessage) -> Tuple[List[str], List[Imag text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] else: - text_parts = [message.content_to_render()] + text_parts = [message.content_to_text()] image_parts = [] return text_parts, image_parts diff --git a/python/samples/agentchat_chess_game/main.py b/python/samples/agentchat_chess_game/main.py index e12db1d94778..01d25cc6a66f 100644 --- a/python/samples/agentchat_chess_game/main.py +++ b/python/samples/agentchat_chess_game/main.py @@ -78,8 +78,7 @@ async def get_ai_move(board: chess.Board, player: AssistantAgent, max_tries: int while count < max_tries: result = await Console(player.run_stream(task=task)) count += 1 - response = result.messages[-1].content - assert isinstance(response, str) + response = result.messages[-1].content_to_text() # Check if the response is a valid UC move. try: move = chess.Move.from_uci(extract_move(response)) diff --git a/python/samples/agentchat_streamlit/agent.py b/python/samples/agentchat_streamlit/agent.py index cbe588828838..a53f22d9f954 100644 --- a/python/samples/agentchat_streamlit/agent.py +++ b/python/samples/agentchat_streamlit/agent.py @@ -1,6 +1,6 @@ import yaml from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.messages import TextMessage +from autogen_agentchat.messages import BaseTextChatMessage, TextMessage from autogen_core import CancellationToken from autogen_core.models import ChatCompletionClient @@ -22,5 +22,5 @@ async def chat(self, prompt: str) -> str: [TextMessage(content=prompt, source="user")], CancellationToken(), ) - assert isinstance(response.chat_message.content, str) + assert isinstance(response.chat_message, BaseTextChatMessage) return response.chat_message.content From c8f98cba3a499f2fa84bd88e43730b7bc0a996f5 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 09:49:56 -0700 Subject: [PATCH 11/26] update --- .../autogen_agentchat/teams/_group_chat/_selector_group_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index a566bf6e0078..04f0f4bf69ba 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -405,7 +405,7 @@ def check_calculation(x: int, y: int, answer: int) -> str: ) def selector_func(messages: Sequence[BaseMessage]) -> str | None: - if len(messages) == 1 or messages[-1].content == "Incorrect!": + if len(messages) == 1 or messages[-1].content_to_text() == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": return "Agent2" From d871cd973992ada4db5c6c2e469cedd9d1af4528 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 09:56:48 -0700 Subject: [PATCH 12/26] lint --- .../autogen-agentchat/src/autogen_agentchat/messages.py | 2 +- .../src/user-guide/agentchat-user-guide/migration-guide.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 6ffdfce4c242..1ca8bd76c275 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -176,7 +176,7 @@ class MyMessageContent(BaseModel): source="agent1", ) - print(message.content_to_str()) # {"text": "Hello", "number": 42} + print(message.content_to_text()) # {"text": "Hello", "number": 42} """ diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 74fdda4b23ac..0b3514985a54 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -691,7 +691,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_render()) + print("Assistant:", response.chat_message.content_to_text()) await model_client.close() asyncio.run(main()) @@ -1330,7 +1330,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_render()) + print("Assistant:", response.chat_message.content_to_text()) await model_client.close() From de89005b48784e11e5c172ba2a6abaa4f35e08ae Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:27:43 -0700 Subject: [PATCH 13/26] swap union with base class --- .../Templates/SelectorGroupChat/scenario.py | 4 +- .../agents/_assistant_agent.py | 28 +- .../agents/_base_chat_agent.py | 36 +- .../agents/_code_executor_agent.py | 6 +- .../agents/_society_of_mind_agent.py | 16 +- .../agents/_user_proxy_agent.py | 12 +- .../src/autogen_agentchat/base/_chat_agent.py | 16 +- .../src/autogen_agentchat/base/_task.py | 10 +- .../autogen_agentchat/base/_termination.py | 10 +- .../conditions/_terminations.py | 28 +- .../src/autogen_agentchat/messages.py | 85 +- .../teams/_group_chat/_base_group_chat.py | 34 +- .../_group_chat/_base_group_chat_manager.py | 12 +- .../_group_chat/_chat_agent_container.py | 10 +- .../teams/_group_chat/_events.py | 6 +- .../_magentic_one/_magentic_one_group_chat.py | 4 +- .../_magentic_one_orchestrator.py | 12 +- .../_group_chat/_round_robin_group_chat.py | 12 +- .../teams/_group_chat/_selector_group_chat.py | 34 +- .../teams/_group_chat/_swarm_group_chat.py | 12 +- .../src/autogen_agentchat/ui/_console.py | 9 +- .../tests/test_assistant_agent.py | 4 +- .../tests/test_group_chat.py | 36 +- .../tests/test_group_chat_endpoint.py | 4 +- .../tests/test_group_chat_pause_resume.py | 6 +- .../tests/test_magentic_one_group_chat.py | 6 +- .../tests/test_userproxy_agent.py | 4 +- .../agentchat-user-guide/custom-agents.ipynb | 46 +- .../agentchat-user-guide/migration-guide.md | 16 +- .../selector-group-chat.ipynb | 10 +- .../tutorial/agents.ipynb | 1690 ++++++++--------- .../tutorial/messages.ipynb | 255 ++- .../tutorial/termination.ipynb | 8 +- .../agents/file_surfer/_file_surfer.py | 6 +- .../agents/openai/_openai_assistant_agent.py | 16 +- .../web_surfer/_multimodal_web_surfer.py | 12 +- .../task_centric_memory/utils/apprentice.py | 6 +- .../task_centric_memory/utils/page_logger.py | 4 +- .../src/autogen_ext/ui/_rich_console.py | 9 +- .../tests/test_openai_assistant_agent.py | 4 +- .../autogenstudio/datamodel/types.py | 4 +- .../autogenstudio/teammanager/teammanager.py | 4 +- .../autogenstudio/web/managers/connection.py | 5 +- python/samples/agentchat_streamlit/agent.py | 4 +- 44 files changed, 1269 insertions(+), 1286 deletions(-) diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py index 64f6310cd8ba..6c2e3a5292e4 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py @@ -14,7 +14,7 @@ from autogen_ext.agents.web_surfer import MultimodalWebSurfer from autogen_ext.agents.file_surfer import FileSurfer from autogen_agentchat.agents import CodeExecutorAgent -from autogen_agentchat.messages import BaseMessage, TextMessage, MultiModalMessage, StopMessage +from autogen_agentchat.messages import ChatMessage, AgentEvent, TextMessage, MultiModalMessage, StopMessage from autogen_core.models import LLMMessage, UserMessage # Suppress warnings about the requests.Session() not being closed @@ -139,7 +139,7 @@ def __init__(self, prompt: str, model_client: ChatCompletionClient, termination_ def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 7d0d95445a16..b6a677d94f87 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -40,8 +40,8 @@ from ..base import Handoff as HandoffBase from ..base import Response from ..messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, MemoryQueryEvent, ModelClientStreamingChunkEvent, @@ -697,8 +697,8 @@ def __init__( self._is_running = False @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: - message_types: List[type[BaseChatMessage]] = [TextMessage] + def produced_message_types(self) -> Sequence[type[ChatMessage]]: + message_types: List[type[ChatMessage]] = [TextMessage] if self._handoffs: message_types.append(HandoffMessage) if self._tools: @@ -712,15 +712,15 @@ def model_context(self) -> ChatCompletionContext: """ return self._model_context - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """ Process the incoming messages with the assistant agent and yield events/responses as they happen. """ @@ -745,7 +745,7 @@ async def on_messages_stream( ) # STEP 2: Update model context with any relevant memory - inner_messages: List[BaseMessage] = [] + inner_messages: List[ChatMessage | AgentEvent] = [] for event_msg in await self._update_model_context_with_memory( memory=memory, model_context=model_context, @@ -810,7 +810,7 @@ async def on_messages_stream( @staticmethod async def _add_messages_to_context( model_context: ChatCompletionContext, - messages: Sequence[BaseChatMessage], + messages: Sequence[ChatMessage], ) -> None: """ Add incoming messages to the model context. @@ -886,7 +886,7 @@ async def _call_llm( async def _process_model_result( cls, model_result: CreateResult, - inner_messages: List[BaseMessage], + inner_messages: List[ChatMessage | AgentEvent], cancellation_token: CancellationToken, agent_name: str, system_messages: List[SystemMessage], @@ -898,7 +898,7 @@ async def _process_model_result( model_client_stream: bool, reflect_on_tool_use: bool, tool_call_summary_format: str, - ) -> AsyncGenerator[BaseMessage | Response, None]: + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """ Handle final or partial responses from model_result, including tool calls, handoffs, and reflection if needed. @@ -992,7 +992,7 @@ async def _process_model_result( def _check_and_handle_handoff( model_result: CreateResult, executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[BaseMessage], + inner_messages: List[ChatMessage | AgentEvent], handoffs: Dict[str, HandoffBase], agent_name: str, ) -> Optional[Response]: @@ -1057,7 +1057,7 @@ async def _reflect_on_tool_use_flow( model_client_stream: bool, model_context: ChatCompletionContext, agent_name: str, - inner_messages: List[BaseMessage], + inner_messages: List[ChatMessage | AgentEvent], ) -> AsyncGenerator[Response | ModelClientStreamingChunkEvent | ThoughtEvent, None]: """ If reflect_on_tool_use=True, we do another inference based on tool results @@ -1109,7 +1109,7 @@ async def _reflect_on_tool_use_flow( @staticmethod def _summarize_tool_use( executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[BaseMessage], + inner_messages: List[ChatMessage | AgentEvent], handoffs: Dict[str, HandoffBase], tool_call_summary_format: str, agent_name: str, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index f37d46cadfb5..8e671bdd8896 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -6,8 +6,8 @@ from ..base import ChatAgent, Response, TaskResult from ..messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, ModelClientStreamingChunkEvent, TextMessage, ) @@ -59,13 +59,13 @@ def description(self) -> str: @property @abstractmethod - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: """The types of messages that the agent produces in the - :attr:`Response.chat_message` field. They must be :class:`BaseChatMessage` types.""" + :attr:`Response.chat_message` field. They must be :class:`ChatMessage` types.""" ... @abstractmethod - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: """Handles incoming messages and returns a response. .. note:: @@ -81,8 +81,8 @@ async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_to ... async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """Handles incoming messages and returns a stream of messages and and the final item is the response. The base implementation in :class:`BaseChatAgent` simply calls :meth:`on_messages` and yields @@ -106,21 +106,21 @@ async def on_messages_stream( async def run( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the agent with the given task and return the result.""" if cancellation_token is None: cancellation_token = CancellationToken() - input_messages: List[BaseChatMessage] = [] - output_messages: List[BaseMessage] = [] + input_messages: List[ChatMessage] = [] + output_messages: List[ChatMessage | AgentEvent] = [] if task is None: pass elif isinstance(task, str): text_msg = TextMessage(content=task, source="user") input_messages.append(text_msg) output_messages.append(text_msg) - elif isinstance(task, BaseChatMessage): + elif isinstance(task, ChatMessage): input_messages.append(task) output_messages.append(task) else: @@ -128,7 +128,7 @@ async def run( raise ValueError("Task list cannot be empty.") # Task is a sequence of messages. for msg in task: - if isinstance(msg, BaseChatMessage): + if isinstance(msg, ChatMessage): input_messages.append(msg) output_messages.append(msg) else: @@ -142,15 +142,15 @@ async def run( async def run_stream( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[BaseMessage | TaskResult, None]: + ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: """Run the agent with the given task and return a stream of messages and the final task result as the last item in the stream.""" if cancellation_token is None: cancellation_token = CancellationToken() - input_messages: List[BaseChatMessage] = [] - output_messages: List[BaseMessage] = [] + input_messages: List[ChatMessage] = [] + output_messages: List[ChatMessage | AgentEvent] = [] if task is None: pass elif isinstance(task, str): @@ -158,7 +158,7 @@ async def run_stream( input_messages.append(text_msg) output_messages.append(text_msg) yield text_msg - elif isinstance(task, BaseChatMessage): + elif isinstance(task, ChatMessage): input_messages.append(task) output_messages.append(task) yield task @@ -166,7 +166,7 @@ async def run_stream( if not task: raise ValueError("Task list cannot be empty.") for msg in task: - if isinstance(msg, BaseChatMessage): + if isinstance(msg, ChatMessage): input_messages.append(msg) output_messages.append(msg) yield msg diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index ccd57ed219af..e94ce4550efb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -7,7 +7,7 @@ from typing_extensions import Self from ..base import Response -from ..messages import BaseChatMessage, TextMessage +from ..messages import ChatMessage, TextMessage from ._base_chat_agent import BaseChatAgent @@ -119,11 +119,11 @@ def __init__( self._sources = sources @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: """The types of messages that the code executor agent produces.""" return (TextMessage,) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: # Extract code blocks from the messages. code_blocks: List[CodeBlock] = [] for msg in messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index 2a9572e8937d..e78c3741db9a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -10,8 +10,8 @@ from ..base import TaskResult, Team from ..messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, ModelClientStreamingChunkEvent, TextMessage, ) @@ -122,10 +122,10 @@ def __init__( self._response_prompt = response_prompt @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: # Call the stream method and collect the messages. response: Response | None = None async for msg in self.on_messages_stream(messages, cancellation_token): @@ -135,14 +135,14 @@ async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_to return response async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: # Prepare the task for the team of agents. task = list(messages) # Run the team of agents. result: TaskResult | None = None - inner_messages: List[BaseMessage] = [] + inner_messages: List[ChatMessage | AgentEvent] = [] count = 0 async for inner_msg in self._team.run_stream(task=task, cancellation_token=cancellation_token): if isinstance(inner_msg, TaskResult): @@ -167,7 +167,7 @@ async def on_messages_stream( # Generate a response using the model client. llm_messages: List[LLMMessage] = [SystemMessage(content=self._instruction)] for message in messages: - if isinstance(message, BaseChatMessage): + if isinstance(message, ChatMessage): llm_messages.append(message.content_to_model_message()) llm_messages.append(SystemMessage(content=self._response_prompt)) completion = await self._model_client.create(messages=llm_messages, cancellation_token=cancellation_token) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 785f0b08f1d3..7131b1f13abb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -10,7 +10,7 @@ from typing_extensions import Self from ..base import Response -from ..messages import BaseChatMessage, BaseMessage, HandoffMessage, TextMessage, UserInputRequestedEvent +from ..messages import AgentEvent, ChatMessage, HandoffMessage, TextMessage, UserInputRequestedEvent from ._base_chat_agent import BaseChatAgent SyncInputFunc = Callable[[str], str] @@ -168,11 +168,11 @@ def __init__( self._is_async = iscoroutinefunction(self.input_func) @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: """Message types this agent can produce.""" return (TextMessage, HandoffMessage) - def _get_latest_handoff(self, messages: Sequence[BaseChatMessage]) -> Optional[HandoffMessage]: + def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[HandoffMessage]: """Find the HandoffMessage in the message sequence that addresses this agent.""" if len(messages) > 0 and isinstance(messages[-1], HandoffMessage): if messages[-1].target == self.name: @@ -199,15 +199,15 @@ async def _get_input(self, prompt: str, cancellation_token: Optional[Cancellatio except Exception as e: raise RuntimeError(f"Failed to get user input: {str(e)}") from e - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """Handle incoming messages by requesting user input.""" try: # Check for handoff first diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index e54df1f22953..8a7053db21a2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -5,7 +5,7 @@ from autogen_core import CancellationToken, ComponentBase from pydantic import BaseModel -from ..messages import BaseChatMessage, BaseMessage +from ..messages import AgentEvent, ChatMessage from ._task import TaskRunner @@ -13,10 +13,10 @@ class Response: """A response from calling :meth:`ChatAgent.on_messages`.""" - chat_message: BaseChatMessage + chat_message: ChatMessage """A chat message produced by the agent as the response.""" - inner_messages: Sequence[BaseMessage] | None = None + inner_messages: Sequence[ChatMessage | AgentEvent] | None = None """Inner messages produced by the agent.""" @@ -42,20 +42,20 @@ def description(self) -> str: @property @abstractmethod - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: """The types of messages that the agent produces in the - :attr:`Response.chat_message` field. They must be :class:`BaseChatMessage` types.""" + :attr:`Response.chat_message` field. They must be :class:`ChatMessage` types.""" ... @abstractmethod - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: """Handles incoming messages and returns a response.""" ... @abstractmethod def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """Handles incoming messages and returns a stream of inner messages and and the final item is the response.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py index f859978968f3..ca3e44dbce0b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py @@ -3,14 +3,14 @@ from autogen_core import CancellationToken -from ..messages import BaseChatMessage, BaseMessage +from ..messages import AgentEvent, ChatMessage @dataclass class TaskResult: """Result of running a task.""" - messages: Sequence[BaseMessage] + messages: Sequence[ChatMessage | AgentEvent] """Messages produced by the task.""" stop_reason: str | None = None @@ -23,7 +23,7 @@ class TaskRunner(Protocol): async def run( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the task and return the result. @@ -38,9 +38,9 @@ async def run( def run_stream( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[BaseMessage | TaskResult, None]: + ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: """Run the task and produces a stream of messages and the final result :class:`TaskResult` as the last item in the stream. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index ae9b06578b5d..2df6da347629 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -6,7 +6,7 @@ from pydantic import BaseModel from typing_extensions import Self -from ..messages import BaseMessage, StopMessage +from ..messages import AgentEvent, ChatMessage, StopMessage class TerminatedException(BaseException): ... @@ -15,7 +15,7 @@ class TerminatedException(BaseException): ... class TerminationCondition(ABC, ComponentBase[BaseModel]): """A stateful condition that determines when a conversation should be terminated. - A termination condition is a callable that takes a sequence of :class:`BaseMessage` objects + A termination condition is a callable that takes a sequence of :class:`ChatMessage | AgentEvent` objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again. @@ -56,7 +56,7 @@ def terminated(self) -> bool: ... @abstractmethod - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: """Check if the conversation should be terminated based on the messages received since the last time the condition was called. Return a StopMessage if the conversation should be terminated, or None otherwise. @@ -102,7 +102,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return all(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached.") # Check all remaining conditions. @@ -153,7 +153,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return any(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self.terminated: raise RuntimeError("Termination condition has already been reached") stop_messages = await asyncio.gather(*[condition(messages) for condition in self._conditions]) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index db6203345707..0b4aac79b3cb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -7,8 +7,8 @@ from ..base import TerminatedException, TerminationCondition from ..messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, StopMessage, TextMessage, @@ -33,7 +33,7 @@ def __init__(self) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -79,10 +79,10 @@ def __init__(self, max_messages: int, include_agent_event: bool = False) -> None def terminated(self) -> bool: return self._message_count >= self._max_messages - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") - self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, BaseChatMessage)]) + self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, ChatMessage)]) if self._message_count >= self._max_messages: return StopMessage( content=f"Maximum number of messages {self._max_messages} reached, current message count: {self._message_count}", @@ -108,7 +108,7 @@ class TextMentionTerminationConfig(BaseModel): class TextMentionTermination(TerminationCondition, Component[TextMentionTerminationConfig]): - """Terminate the conversation if a specific text is mentioned in any :class:`~autogen_agentchat.messages.BaseChatMessage`. + """Terminate the conversation if a specific text is mentioned in any :class:`~autogen_agentchat.messages.ChatMessage`. Args: text: The text to look for in the messages. @@ -127,7 +127,7 @@ def __init__(self, text: str, sources: Sequence[str] | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -199,7 +199,7 @@ def terminated(self) -> bool: or (self._max_completion_token is not None and self._completion_token_count >= self._max_completion_token) ) - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -256,7 +256,7 @@ def __init__(self, target: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -301,7 +301,7 @@ def __init__(self, timeout_seconds: float) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") @@ -363,7 +363,7 @@ def set(self) -> None: """Set the termination condition to terminated.""" self._setted = True - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if self._setted: @@ -408,7 +408,7 @@ def __init__(self, sources: List[str]) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if not messages: @@ -461,7 +461,7 @@ def __init__(self, source: str | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -511,7 +511,7 @@ def __init__(self, function_name: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None: + async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 1ca8bd76c275..8f08609b6c66 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -1,6 +1,6 @@ """ This module defines various message types used for agent-to-agent communication. -Each message type inherits either from the BaseChatMessage class or BaseAgentEvent +Each message type inherits either from the ChatMessage class or BaseAgentEvent class and includes specific fields relevant to the type of message being sent. """ @@ -10,8 +10,8 @@ class and includes specific fields relevant to the type of message being sent. from autogen_core import FunctionCall, Image from autogen_core.memory import MemoryContent from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage, UserMessage -from pydantic import BaseModel, ConfigDict, Field, computed_field -from typing_extensions import Annotated, Self +from pydantic import BaseModel, ConfigDict, computed_field +from typing_extensions import Self class BaseMessage(BaseModel, ABC): @@ -20,7 +20,7 @@ class BaseMessage(BaseModel, ABC): .. warning:: If you want to create a new message type, do not inherit from this class. - Instead, inherit from :class:`BaseChatMessage` or :class:`BaseAgentEvent` + Instead, inherit from :class:`ChatMessage` or :class:`BaseAgentEvent` to clarify the purpose of the message type. """ @@ -51,7 +51,7 @@ def content_to_text(self) -> str: that can be rendered in the console and inspected by the user. This is not used for creating text-only content for models. - For :class:`BaseChatMessage` types, use :meth:`content_to_model_text` instead.""" + For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" ... def dump(self) -> Mapping[str, Any]: @@ -73,7 +73,7 @@ def load(cls, data: Mapping[str, Any]) -> Self: return cls.model_validate(data) -class BaseChatMessage(BaseMessage, ABC): +class ChatMessage(BaseMessage, ABC): """Base class for chat messages. .. note:: @@ -85,7 +85,7 @@ class BaseChatMessage(BaseMessage, ABC): This class is used for messages that are sent between agents in a chat conversation. Agents are expected to process the content of the - message using models and return a response as another :class:`BaseChatMessage`. + message using models and return a response as another :class:`ChatMessage`. """ @abstractmethod @@ -110,8 +110,8 @@ def content_to_model_message(self) -> UserMessage: ... -class BaseTextChatMessage(BaseChatMessage, ABC): - """Base class for all text-only :class:`BaseChatMessage` types. +class TextChatMessage(ChatMessage, ABC): + """Base class for all text-only :class:`ChatMessage` types. It has implementations for :meth:`content_to_text`, :meth:`content_to_model_text`, and :meth:`content_to_model_message` methods. @@ -131,7 +131,7 @@ def content_to_model_message(self) -> UserMessage: return UserMessage(content=self.content, source=self.source) -class BaseAgentEvent(BaseMessage, ABC): +class AgentEvent(BaseMessage, ABC): """Base class for agent events. .. note:: @@ -154,8 +154,8 @@ class BaseAgentEvent(BaseMessage, ABC): """Type variable for structured content types.""" -class StructuredMessage(BaseChatMessage, Generic[StructuredContentType]): - """A :class:`BaseChatMessage` type with an unspecified content type. +class StructuredMessage(ChatMessage, Generic[StructuredContentType]): + """A :class:`ChatMessage` type with an unspecified content type. To create a new structured message type, specify the content type as a subclass of `Pydantic BaseModel `_. @@ -197,13 +197,13 @@ def content_to_model_message(self) -> UserMessage: ) -class TextMessage(BaseTextChatMessage): +class TextMessage(TextChatMessage): """A text message with string-only content.""" ... -class MultiModalMessage(BaseChatMessage): +class MultiModalMessage(ChatMessage): """A multimodal message.""" content: List[str | Image] @@ -243,13 +243,13 @@ def content_to_model_message(self) -> UserMessage: return UserMessage(content=self.content, source=self.source) -class StopMessage(BaseTextChatMessage): +class StopMessage(TextChatMessage): """A message requesting stop of a conversation.""" ... -class HandoffMessage(BaseTextChatMessage): +class HandoffMessage(TextChatMessage): """A message requesting handoff of a conversation to another agent.""" target: str @@ -259,13 +259,13 @@ class HandoffMessage(BaseTextChatMessage): """The model context to be passed to the target agent.""" -class ToolCallSummaryMessage(BaseTextChatMessage): +class ToolCallSummaryMessage(TextChatMessage): """A message signaling the summary of tool call results.""" ... -class ToolCallRequestEvent(BaseAgentEvent): +class ToolCallRequestEvent(AgentEvent): """An event signaling a request to use tools.""" content: List[FunctionCall] @@ -275,7 +275,7 @@ def content_to_text(self) -> str: return str(self.content) -class ToolCallExecutionEvent(BaseAgentEvent): +class ToolCallExecutionEvent(AgentEvent): """An event signaling the execution of tool calls.""" content: List[FunctionExecutionResult] @@ -285,7 +285,7 @@ def content_to_text(self) -> str: return str(self.content) -class UserInputRequestedEvent(BaseAgentEvent): +class UserInputRequestedEvent(AgentEvent): """An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback.""" request_id: str @@ -298,7 +298,7 @@ def content_to_text(self) -> str: return str(self.content) -class MemoryQueryEvent(BaseAgentEvent): +class MemoryQueryEvent(AgentEvent): """An event signaling the results of memory queries.""" content: List[MemoryContent] @@ -308,7 +308,7 @@ def content_to_text(self) -> str: return str(self.content) -class ModelClientStreamingChunkEvent(BaseAgentEvent): +class ModelClientStreamingChunkEvent(AgentEvent): """An event signaling a text output chunk from a model client in streaming mode.""" content: str @@ -318,7 +318,7 @@ def content_to_text(self) -> str: return self.content -class ThoughtEvent(BaseAgentEvent): +class ThoughtEvent(AgentEvent): """An event signaling the thought process of a model. It is used to communicate the reasoning tokens generated by a reasoning model, or the extra text content generated by a function call.""" @@ -330,25 +330,6 @@ def content_to_text(self) -> str: return self.content -ChatMessage = Annotated[ - TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, - Field(discriminator="type"), -] -"""Union of built-in :class:`BaseChatMessage`. This does not include the :class:`StructuredMessage` class.""" - - -AgentEvent = Annotated[ - ToolCallRequestEvent - | ToolCallExecutionEvent - | MemoryQueryEvent - | UserInputRequestedEvent - | ModelClientStreamingChunkEvent - | ThoughtEvent, - Field(discriminator="type"), -] -"""Union of built-in :class:`BaseAgentEvent`.""" - - class MessageFactory: """:meta private: @@ -358,7 +339,7 @@ class MessageFactory: """ def __init__(self) -> None: - self._message_types: Dict[str, type[BaseMessage]] = {} + self._message_types: Dict[str, type[ChatMessage | AgentEvent]] = {} # Register all message types. self._message_types[TextMessage.__name__] = TextMessage self._message_types[MultiModalMessage.__name__] = MultiModalMessage @@ -372,26 +353,26 @@ def __init__(self) -> None: self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent self._message_types[ThoughtEvent.__name__] = ThoughtEvent - def is_registered(self, message_type: type[BaseMessage]) -> bool: + def is_registered(self, message_type: type[ChatMessage | AgentEvent]) -> bool: """Check if a message type is registered with the factory.""" # Get the class name of the message type. class_name = message_type.__name__ # Check if the class name is already registered. return class_name in self._message_types - def register(self, message_type: type[BaseMessage]) -> None: + def register(self, message_type: type[ChatMessage | AgentEvent]) -> None: """Register a new message type with the factory.""" if self.is_registered(message_type): raise ValueError(f"Message type {message_type} is already registered.") - if not issubclass(message_type, BaseMessage): - raise ValueError(f"Message type {message_type} must be a subclass of BaseMessage.") + if not issubclass(message_type, ChatMessage) and not issubclass(message_type, AgentEvent): + raise ValueError(f"Message type {message_type} must be a subclass of ChatMessage or AgentEvent.") # Get the class name of the class_name = message_type.__name__ # Check if the class name is already registered. # Register the message type. self._message_types[class_name] = message_type - def create(self, data: Mapping[str, Any]) -> BaseMessage: + def create(self, data: Mapping[str, Any]) -> ChatMessage | AgentEvent: """Create a message from a dictionary of JSON-serializable data.""" # Get the type of the message from the dictionary. message_type = data.get("type") @@ -404,16 +385,16 @@ def create(self, data: Mapping[str, Any]) -> BaseMessage: message_class = self._message_types[message_type] # Create an instance of the message class. - assert issubclass(message_class, BaseMessage) + assert issubclass(message_class, ChatMessage) or issubclass(message_class, AgentEvent) return message_class.load(data) __all__ = [ "AgentEvent", "BaseMessage", - "BaseChatMessage", - "BaseAgentEvent", - "BaseTextChatMessage", + "ChatMessage", + "AgentEvent", + "TextChatMessage", "ChatMessage", "StructuredContentType", "StructuredMessage", diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index 264dee15f089..a568b35302ce 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -18,8 +18,8 @@ from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TaskResult, Team, TerminationCondition from ...messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, MessageFactory, ModelClientStreamingChunkEvent, StopMessage, @@ -50,7 +50,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[BaseMessage]] | None = None, + custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, ): if len(participants) == 0: raise ValueError("At least one participant is required.") @@ -90,7 +90,7 @@ def __init__( self._output_topic_type = f"output_topic_{self._team_id}" # The queue for collecting the output messages. - self._output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination] = asyncio.Queue() + self._output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination] = asyncio.Queue() # Create a runtime for the team. if runtime is not None: @@ -117,7 +117,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -195,7 +195,7 @@ async def _init(self, runtime: AgentRuntime) -> None: async def run( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the team and return the result. The base implementation uses @@ -203,7 +203,7 @@ async def run( Once the team is stopped, the termination condition is reset. Args: - task (str | BaseChatMessage | Sequence[BaseChatMessage] | None): The task to run the team with. Can be a string, a single :class:`BaseChatMessage` , or a list of :class:`BaseChatMessage`. + task (str | ChatMessage | Sequence[ChatMessage] | None): The task to run the team with. Can be a string, a single :class:`ChatMessage` , or a list of :class:`ChatMessage`. cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately. Setting the cancellation token potentially put the team in an inconsistent state, and it may not reset the termination condition. @@ -297,9 +297,9 @@ async def main() -> None: async def run_stream( self, *, - task: str | BaseChatMessage | Sequence[BaseChatMessage] | None = None, + task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[BaseMessage | TaskResult, None]: + ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: """Run the team and produces a stream of messages and the final result of the type :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. Once the team is stopped, the termination condition is reset. @@ -311,14 +311,14 @@ async def run_stream( :attr:`~autogen_agentchat.base.TaskResult.messages`. Args: - task (str | BaseChatMessage | Sequence[BaseChatMessage] | None): The task to run the team with. Can be a string, a single :class:`BaseChatMessage` , or a list of :class:`BaseChatMessage`. + task (str | ChatMessage | Sequence[ChatMessage] | None): The task to run the team with. Can be a string, a single :class:`ChatMessage` , or a list of :class:`ChatMessage`. cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately. Setting the cancellation token potentially put the team in an inconsistent state, and it may not reset the termination condition. To gracefully stop the team, use :class:`~autogen_agentchat.conditions.ExternalTermination` instead. Returns: - stream: an :class:`~collections.abc.AsyncGenerator` that yields :class:`~autogen_agentchat.messages.AgentEvent`, :class:`~autogen_agentchat.messages.BaseChatMessage`, and the final result :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. + stream: an :class:`~collections.abc.AsyncGenerator` that yields :class:`~autogen_agentchat.messages.AgentEvent`, :class:`~autogen_agentchat.messages.ChatMessage`, and the final result :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. Example using the :class:`~autogen_agentchat.teams.RoundRobinGroupChat` team: @@ -398,23 +398,23 @@ async def main() -> None: """ # Create the messages list if the task is a string or a chat message. - messages: List[BaseChatMessage] | None = None + messages: List[ChatMessage] | None = None if task is None: pass elif isinstance(task, str): messages = [TextMessage(content=task, source="user")] - elif isinstance(task, BaseChatMessage): + elif isinstance(task, ChatMessage): messages = [task] elif isinstance(task, list): if not task: raise ValueError("Task list cannot be empty.") messages = [] for msg in task: - if not isinstance(msg, BaseChatMessage): - raise ValueError("All messages in task list must be valid BaseChatMessage types") + if not isinstance(msg, ChatMessage): + raise ValueError("All messages in task list must be valid ChatMessage types") messages.append(msg) else: - raise ValueError("Task must be a string, a BaseChatMessage, or a list of BaseChatMessage.") + raise ValueError("Task must be a string, a ChatMessage, or a list of ChatMessage.") # Check if the messages types are registered with the message factory. if messages is not None: for msg in messages: @@ -469,7 +469,7 @@ async def stop_runtime() -> None: cancellation_token=cancellation_token, ) # Collect the output messages in order. - output_messages: List[BaseMessage] = [] + output_messages: List[ChatMessage | AgentEvent] = [] stop_reason: str | None = None # Yield the messsages until the queue is empty. while True: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py index cbf1c5b56c7e..157b1fb0a57b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py @@ -5,7 +5,7 @@ from autogen_core import DefaultTopicId, MessageContext, event, rpc from ...base import TerminationCondition -from ...messages import BaseChatMessage, BaseMessage, MessageFactory, StopMessage +from ...messages import AgentEvent, ChatMessage, MessageFactory, StopMessage from ._events import ( GroupChatAgentResponse, GroupChatMessage, @@ -39,7 +39,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -67,7 +67,7 @@ def __init__( name: topic_type for name, topic_type in zip(participant_names, participant_topic_types, strict=True) } self._participant_descriptions = participant_descriptions - self._message_thread: List[BaseMessage] = [] + self._message_thread: List[ChatMessage | AgentEvent] = [] self._output_message_queue = output_message_queue self._termination_condition = termination_condition if max_turns is not None and max_turns <= 0: @@ -141,7 +141,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # Append the message to the message thread and construct the delta. - delta: List[BaseMessage] = [] + delta: List[ChatMessage | AgentEvent] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: self._message_thread.append(inner_message) @@ -225,7 +225,7 @@ async def handle_resume(self, message: GroupChatResume, ctx: MessageContext) -> pass @abstractmethod - async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: """Validate the state of the group chat given the start messages. This is executed when the group chat manager receives a GroupChatStart event. @@ -235,7 +235,7 @@ async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> ... @abstractmethod - async def select_speaker(self, thread: List[BaseMessage]) -> str: + async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: """Select a speaker from the participants and return the topic type of the selected speaker.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index 330057672ffe..3df2ae728ca3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -2,7 +2,7 @@ from autogen_core import DefaultTopicId, MessageContext, event, rpc -from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MessageFactory +from autogen_agentchat.messages import AgentEvent, ChatMessage, MessageFactory from ...base import ChatAgent, Response from ...state import ChatAgentContainerState @@ -46,7 +46,7 @@ def __init__( self._parent_topic_type = parent_topic_type self._output_topic_type = output_topic_type self._agent = agent - self._message_buffer: List[BaseChatMessage] = [] + self._message_buffer: List[ChatMessage] = [] self._message_factory = message_factory @event @@ -90,13 +90,13 @@ async def handle_request(self, message: GroupChatRequestPublish, ctx: MessageCon cancellation_token=ctx.cancellation_token, ) - def _buffer_message(self, message: BaseChatMessage) -> None: + def _buffer_message(self, message: ChatMessage) -> None: if not self._message_factory.is_registered(message.__class__): raise ValueError(f"Message type {message.__class__} is not registered.") # Buffer the message. self._message_buffer.append(message) - async def _log_message(self, message: BaseMessage) -> None: + async def _log_message(self, message: ChatMessage | AgentEvent) -> None: if not self._message_factory.is_registered(message.__class__): raise ValueError(f"Message type {message.__class__} is not registered.") # Log the message. @@ -130,7 +130,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._message_buffer = [] for message_data in container_state.message_buffer: message = self._message_factory.create(message_data) - if isinstance(message, BaseChatMessage): + if isinstance(message, ChatMessage): self._message_buffer.append(message) else: raise ValueError(f"Invalid message type in message buffer: {type(message)}") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py index 062e77d31fe5..046230846189 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py @@ -3,13 +3,13 @@ from pydantic import BaseModel from ...base import Response -from ...messages import BaseChatMessage, BaseMessage, StopMessage +from ...messages import AgentEvent, ChatMessage, StopMessage class GroupChatStart(BaseModel): """A request to start a group chat.""" - messages: List[BaseChatMessage] | None = None + messages: List[ChatMessage] | None = None """An optional list of messages to start the group chat.""" @@ -29,7 +29,7 @@ class GroupChatRequestPublish(BaseModel): class GroupChatMessage(BaseModel): """A message from a group chat.""" - message: BaseMessage + message: ChatMessage | AgentEvent """The message that was published.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py index c73363dc24ea..dfc639ab1be2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py @@ -9,7 +9,7 @@ from .... import EVENT_LOGGER_NAME, TRACE_LOGGER_NAME from ....base import ChatAgent, TerminationCondition -from ....messages import BaseMessage, MessageFactory +from ....messages import AgentEvent, ChatMessage, MessageFactory from .._base_group_chat import BaseGroupChat from .._events import GroupChatTermination from ._magentic_one_orchestrator import MagenticOneOrchestrator @@ -128,7 +128,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index cbce79baed9f..5f8ba8fc6712 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -15,8 +15,8 @@ from .... import TRACE_LOGGER_NAME from ....base import Response, TerminationCondition from ....messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, MessageFactory, MultiModalMessage, @@ -66,7 +66,7 @@ def __init__( model_client: ChatCompletionClient, max_stalls: int, final_answer_prompt: str, - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, ): super().__init__( @@ -184,7 +184,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # type: ignore - delta: List[BaseMessage] = [] + delta: List[ChatMessage | AgentEvent] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: delta.append(inner_message) @@ -201,7 +201,7 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess return await self._orchestrate_step(ctx.cancellation_token) - async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: pass async def save_state(self) -> Mapping[str, Any]: @@ -226,7 +226,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._n_rounds = orchestrator_state.n_rounds self._n_stalls = orchestrator_state.n_stalls - async def select_speaker(self, thread: List[BaseMessage]) -> str: + async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: """Not used in this orchestrator, we select next speaker in _orchestrate_step.""" return "" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index c3b962975079..3541b0b1694e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -6,7 +6,7 @@ from typing_extensions import Self from ...base import ChatAgent, TerminationCondition -from ...messages import BaseChatMessage, BaseMessage, MessageFactory +from ...messages import AgentEvent, ChatMessage, MessageFactory from ...state import RoundRobinManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -24,7 +24,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -43,7 +43,7 @@ def __init__( ) self._next_speaker_index = 0 - async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: pass async def reset(self) -> None: @@ -67,7 +67,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._current_turn = round_robin_state.current_turn self._next_speaker_index = round_robin_state.next_speaker_index - async def select_speaker(self, thread: List[BaseMessage]) -> str: + async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: """Select a speaker from the participants in a round-robin fashion.""" current_speaker_index = self._next_speaker_index self._next_speaker_index = (current_speaker_index + 1) % len(self._participant_names) @@ -166,7 +166,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[BaseMessage]] | None = None, + custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, ) -> None: super().__init__( participants, @@ -186,7 +186,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 04f0f4bf69ba..7c369fec245d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -13,8 +13,8 @@ from ...agents import BaseChatAgent from ...base import ChatAgent, TerminationCondition from ...messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, MessageFactory, ) from ...state import SelectorManagerState @@ -24,12 +24,12 @@ trace_logger = logging.getLogger(TRACE_LOGGER_NAME) -SyncSelectorFunc = Callable[[Sequence[BaseMessage]], str | None] -AsyncSelectorFunc = Callable[[Sequence[BaseMessage]], Awaitable[str | None]] +SyncSelectorFunc = Callable[[Sequence[ChatMessage | AgentEvent]], str | None] +AsyncSelectorFunc = Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[str | None]] SelectorFuncType = Union[SyncSelectorFunc | AsyncSelectorFunc] -SyncCandidateFunc = Callable[[Sequence[BaseMessage]], List[str]] -AsyncCandidateFunc = Callable[[Sequence[BaseMessage]], Awaitable[List[str]]] +SyncCandidateFunc = Callable[[Sequence[ChatMessage | AgentEvent]], List[str]] +AsyncCandidateFunc = Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[List[str]]] CandidateFuncType = Union[SyncCandidateFunc | AsyncCandidateFunc] @@ -45,7 +45,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -78,7 +78,7 @@ def __init__( self._candidate_func = candidate_func self._is_candidate_func_async = iscoroutinefunction(self._candidate_func) - async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: pass async def reset(self) -> None: @@ -102,7 +102,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._current_turn = selector_state.current_turn self._previous_speaker = selector_state.previous_speaker - async def select_speaker(self, thread: List[BaseMessage]) -> str: + async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: """Selects the next speaker in a group chat using a ChatCompletion client, with the selector function as override if it returns a speaker name. @@ -153,7 +153,7 @@ async def select_speaker(self, thread: List[BaseMessage]) -> str: # Construct the history of the conversation. history_messages: List[str] = [] for msg in thread: - if not isinstance(msg, BaseChatMessage): + if not isinstance(msg, ChatMessage): # Only process chat messages. continue message = f"{msg.source}: {msg.content_to_model_text()}" @@ -299,11 +299,11 @@ class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]): max_selector_attempts (int, optional): The maximum number of attempts to select a speaker using the model. Defaults to 3. If the model fails to select a speaker after the maximum number of attempts, the previous speaker will be used if available, otherwise the first participant will be used. - selector_func (Callable[[Sequence[BaseMessage]], str | None], Callable[[Sequence[BaseMessage]], Awaitable[str | None]], optional): A custom selector + selector_func (Callable[[Sequence[ChatMessage | AgentEvent]], str | None], Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[str | None]], optional): A custom selector function that takes the conversation history and returns the name of the next speaker. If provided, this function will be used to override the model to select the next speaker. If the function returns None, the model will be used to select the next speaker. - candidate_func (Callable[[Sequence[BaseMessage]], List[str]], Callable[[Sequence[BaseMessage]], Awaitable[List[str]]], optional): + candidate_func (Callable[[Sequence[ChatMessage | AgentEvent]], List[str]], Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[List[str]]], optional): A custom function that takes the conversation history and returns a filtered list of candidates for the next speaker selection using model. If the function returns an empty list or `None`, `SelectorGroupChat` will raise a `ValueError`. This function is only used if `selector_func` is not set. The `allow_repeated_speaker` will be ignored if set. @@ -378,7 +378,7 @@ async def book_trip() -> str: from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.ui import Console - from autogen_agentchat.messages import BaseMessage + from autogen_agentchat.messages import ChatMessage | AgentEvent async def main() -> None: @@ -404,7 +404,7 @@ def check_calculation(x: int, y: int, answer: int) -> str: system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages: Sequence[BaseMessage]) -> str | None: + def selector_func(messages: Sequence[ChatMessage | AgentEvent]) -> str | None: if len(messages) == 1 or messages[-1].content_to_text() == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": @@ -448,7 +448,7 @@ def __init__( max_selector_attempts: int = 3, selector_func: Optional[SelectorFuncType] = None, candidate_func: Optional[CandidateFuncType] = None, - custom_message_types: List[type[BaseMessage]] | None = None, + custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, ): super().__init__( participants, @@ -477,7 +477,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -525,7 +525,7 @@ def _from_config(cls, config: SelectorGroupChatConfig) -> Self: selector_prompt=config.selector_prompt, allow_repeated_speaker=config.allow_repeated_speaker, max_selector_attempts=config.max_selector_attempts, - # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[BaseMessage]], str | None]) + # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[ChatMessage | AgentEvent]], str | None]) # if config.selector_func # else None, ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index 0da78e6f39f1..d11b68791181 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -5,7 +5,7 @@ from pydantic import BaseModel from ...base import ChatAgent, TerminationCondition -from ...messages import BaseChatMessage, BaseMessage, HandoffMessage, MessageFactory +from ...messages import AgentEvent, ChatMessage, HandoffMessage, MessageFactory from ...state import SwarmManagerState from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -23,7 +23,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -42,7 +42,7 @@ def __init__( ) self._current_speaker = self._participant_names[0] - async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None: + async def validate_group_state(self, messages: List[ChatMessage] | None) -> None: """Validate the start messages for the group chat.""" # Check if any of the start messages is a handoff message. if messages: @@ -77,7 +77,7 @@ async def reset(self) -> None: await self._termination_condition.reset() self._current_speaker = self._participant_names[0] - async def select_speaker(self, thread: List[BaseMessage]) -> str: + async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: """Select a speaker from the participants based on handoff message. Looks for the last handoff message in the thread to determine the next speaker.""" if len(thread) == 0: @@ -212,7 +212,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[BaseMessage]] | None = None, + custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, ) -> None: super().__init__( participants, @@ -236,7 +236,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[BaseMessage | GroupChatTermination], + output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 84b2396089f4..ced6552ee43a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -11,7 +11,8 @@ from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import Response, TaskResult from autogen_agentchat.messages import ( - BaseMessage, + AgentEvent, + ChatMessage, ModelClientStreamingChunkEvent, MultiModalMessage, UserInputRequestedEvent, @@ -79,7 +80,7 @@ def aprint(output: str, end: str = "\n", flush: bool = False) -> Awaitable[None] async def Console( - stream: AsyncGenerator[BaseMessage | T, None], + stream: AsyncGenerator[ChatMessage | AgentEvent | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -96,7 +97,7 @@ async def Console( It will be improved in future releases. Args: - stream (AsyncGenerator[BaseMessage | TaskResult, None] | AsyncGenerator[BaseMessage | Response, None]): Message stream to render. + stream (AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None] | AsyncGenerator[ChatMessage | AgentEvent | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. @@ -169,7 +170,7 @@ async def Console( user_input_manager.notify_event_received(message.request_id) else: # Cast required for mypy to be happy - message = cast(BaseMessage, message) # type: ignore + message = cast(ChatMessage | AgentEvent, message) # type: ignore if not streaming_chunks: # Print message sender. await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True) diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 932a9943fc16..e04f9415706b 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -7,7 +7,7 @@ from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import Handoff, TaskResult from autogen_agentchat.messages import ( - BaseChatMessage, + ChatMessage, HandoffMessage, MemoryQueryEvent, ModelClientStreamingChunkEvent, @@ -585,7 +585,7 @@ async def test_list_chat_messages(monkeypatch: pytest.MonkeyPatch) -> None: ) # Create a list of chat messages - messages: List[BaseChatMessage] = [ + messages: List[ChatMessage] = [ TextMessage(content="Message 1", source="user"), TextMessage(content="Message 2", source="user"), ] diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index e7be4ebdba86..a44180dbff9e 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -15,8 +15,8 @@ from autogen_agentchat.base import Handoff, Response, TaskResult from autogen_agentchat.conditions import HandoffTermination, MaxMessageTermination, TextMentionTermination from autogen_agentchat.messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -60,14 +60,14 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) > 0: assert isinstance(messages[0], TextMessage) self._last_message = messages[0].content @@ -89,21 +89,21 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: raise ValueError("I am a flaky agent...") async def on_reset(self, cancellation_token: CancellationToken) -> None: self._last_message = None -class _UnknownMessageType(BaseChatMessage): +class _UnknownMessageType(ChatMessage): content: str def content_to_model_message(self) -> UserMessage: @@ -121,10 +121,10 @@ def __init__(self, name: str, description: str) -> None: super().__init__(name, description) @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (_UnknownMessageType,) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: return Response(chat_message=_UnknownMessageType(content="Unknown message type", source=self.name)) async def on_reset(self, cancellation_token: CancellationToken) -> None: @@ -138,10 +138,10 @@ def __init__(self, name: str, description: str, *, stop_at: int = 1) -> None: self._stop_at = stop_at @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage, StopMessage) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: self._count += 1 if self._count < self._stop_at: return await super().on_messages(messages, cancellation_token) @@ -162,7 +162,7 @@ class _InputTask2(BaseModel): data: str -TaskType = str | List[BaseChatMessage] | BaseChatMessage +TaskType = str | List[ChatMessage] | ChatMessage @pytest_asyncio.fixture(params=["single_threaded", "embedded"]) # type: ignore @@ -821,7 +821,7 @@ async def test_selector_group_chat_custom_selector(runtime: AgentRuntime | None) agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _select_agent(messages: Sequence[BaseMessage]) -> str | None: + def _select_agent(messages: Sequence[ChatMessage | AgentEvent]) -> str | None: if len(messages) == 0: return "agent1" elif messages[-1].source == "agent1": @@ -862,7 +862,7 @@ async def test_selector_group_chat_custom_candidate_func(runtime: AgentRuntime | agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _candidate_func(messages: Sequence[BaseMessage]) -> List[str]: + def _candidate_func(messages: Sequence[ChatMessage | AgentEvent]) -> List[str]: if len(messages) == 0: return ["agent1"] elif messages[-1].source == "agent1": @@ -901,10 +901,10 @@ def __init__(self, name: str, description: str, next_agent: str) -> None: self._next_agent = next_agent @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (HandoffMessage,) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: return Response( chat_message=HandoffMessage( content=f"Transferred to {self._next_agent}.", target=self._next_agent, source=self.name @@ -1292,7 +1292,7 @@ async def test_round_robin_group_chat_with_message_list(runtime: AgentRuntime | team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination, runtime=runtime) # Create a list of messages - messages: List[BaseChatMessage] = [ + messages: List[ChatMessage] = [ TextMessage(content="Message 1", source="user"), TextMessage(content="Message 2", source="user"), TextMessage(content="Message 3", source="user"), @@ -1324,7 +1324,7 @@ async def test_round_robin_group_chat_with_message_list(runtime: AgentRuntime | index += 1 # Test with invalid message list - with pytest.raises(ValueError, match="All messages in task list must be valid BaseChatMessage types"): + with pytest.raises(ValueError, match="All messages in task list must be valid ChatMessage types"): await team.run(task=["not a message"]) # type: ignore[list-item, arg-type] # intentionally testing invalid input # Test with empty message list diff --git a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py index 54a14efebd64..cdbcd1df25d5 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py @@ -4,7 +4,7 @@ import pytest from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import BaseMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.ui import Console from autogen_core.models import ChatCompletionClient @@ -33,7 +33,7 @@ async def _test_selector_group_chat(model_client: ChatCompletionClient) -> None: async def _test_selector_group_chat_with_candidate_func(model_client: ChatCompletionClient) -> None: filtered_participants = ["developer", "tester"] - def dummy_candidate_func(thread: Sequence[BaseMessage]) -> List[str]: + def dummy_candidate_func(thread: Sequence[ChatMessage | AgentEvent]) -> List[str]: # Dummy candidate function that will return # only return developer and reviewer return filtered_participants diff --git a/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py b/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py index e26c7262d66d..ee87f6f4a25b 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat_pause_resume.py @@ -5,7 +5,7 @@ import pytest_asyncio from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import BaseChatMessage, TextMessage +from autogen_agentchat.messages import ChatMessage, TextMessage from autogen_agentchat.teams import RoundRobinGroupChat from autogen_core import AgentRuntime, CancellationToken, SingleThreadedAgentRuntime @@ -20,10 +20,10 @@ def __init__(self, name: str, description: str) -> None: self.counter = 0 @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return [TextMessage] - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: assert not self._is_paused, "Agent is paused" async def _process() -> None: diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index f4f00db1c854..bac10ee9c879 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -11,7 +11,7 @@ ) from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - BaseChatMessage, + ChatMessage, TextMessage, ) from autogen_agentchat.teams import ( @@ -34,14 +34,14 @@ def __init__(self, name: str, description: str) -> None: self._total_messages = 0 @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) @property def total_messages(self) -> int: return self._total_messages - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) > 0: assert isinstance(messages[0], TextMessage) self._last_message = messages[0].content diff --git a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py index 855211de82a9..8ff6422a696f 100644 --- a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py +++ b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py @@ -4,7 +4,7 @@ import pytest from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import BaseChatMessage, HandoffMessage, TextMessage +from autogen_agentchat.messages import ChatMessage, HandoffMessage, TextMessage from autogen_core import CancellationToken @@ -53,7 +53,7 @@ def custom_input(prompt: str) -> str: agent = UserProxyAgent(name="test_user", input_func=custom_input) - messages: Sequence[BaseChatMessage] = [ + messages: Sequence[ChatMessage] = [ TextMessage(content="Initial message", source="assistant"), HandoffMessage(content="Handing off to user for confirmation", source="assistant", target="test_user"), ] diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index c8014d7dc448..dfcbf2e997d7 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -14,7 +14,7 @@ "\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", "\n", "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", @@ -53,7 +53,7 @@ "\n", "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, TextMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n", "from autogen_core import CancellationToken\n", "\n", "\n", @@ -63,10 +63,10 @@ " self._count = count\n", "\n", " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", " return (TextMessage,)\n", "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", " # Calls the on_messages_stream.\n", " response: Response | None = None\n", " async for message in self.on_messages_stream(messages, cancellation_token):\n", @@ -76,9 +76,9 @@ " return response\n", "\n", " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", - " inner_messages: List[BaseMessage] = []\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", + " inner_messages: List[ChatMessage | AgentEvent] = []\n", " for i in range(self._count, 0, -1):\n", " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", " inner_messages.append(msg)\n", @@ -135,7 +135,7 @@ "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import BaseChatMessage\n", + "from autogen_agentchat.messages import ChatMessage\n", "from autogen_agentchat.teams import SelectorGroupChat\n", "from autogen_agentchat.ui import Console\n", "from autogen_core import CancellationToken\n", @@ -146,13 +146,13 @@ " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", " super().__init__(name, description=description)\n", " self._operator_func = operator_func\n", - " self._message_history: List[BaseChatMessage] = []\n", + " self._message_history: List[ChatMessage] = []\n", "\n", " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", " return (TextMessage,)\n", "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", " # Update the message history.\n", " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", " self._message_history.extend(messages)\n", @@ -268,7 +268,7 @@ " )\n", "\n", " # Run the selector group chat with a given task and stream the response.\n", - " task: List[BaseChatMessage] = [\n", + " task: List[ChatMessage] = [\n", " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", " TextMessage(content=\"10\", source=\"user\"),\n", " ]\n", @@ -319,7 +319,7 @@ "\n", "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, BaseMessage, StructuredMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, StructuredMessage\n", "from autogen_core import CancellationToken\n", "from autogen_core.model_context import UnboundedChatCompletionContext\n", "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", @@ -344,10 +344,10 @@ " self._model = model\n", "\n", " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", " return (TextMessage,)\n", "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", " final_response = None\n", " async for message in self.on_messages_stream(messages, cancellation_token):\n", " if isinstance(message, Response):\n", @@ -359,8 +359,8 @@ " return final_response\n", "\n", " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", " if isinstance(msg, StructuredMessage):\n", @@ -554,7 +554,7 @@ "\n", "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import BaseChatMessage, BaseMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", "from autogen_core import CancellationToken, Component\n", "from pydantic import BaseModel\n", "from typing_extensions import Self\n", @@ -587,10 +587,10 @@ " self._model = model\n", "\n", " @property\n", - " def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", " return (TextMessage,)\n", "\n", - " async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", " final_response = None\n", " async for message in self.on_messages_stream(messages, cancellation_token):\n", " if isinstance(message, Response):\n", @@ -602,8 +602,8 @@ " return final_response\n", "\n", " async def on_messages_stream(\n", - " self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[BaseMessage | Response, None]:\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", " if isinstance(msg, StructuredMessage):\n", @@ -744,4 +744,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 0b3514985a54..83bc86b989de 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -462,18 +462,18 @@ and implement the `on_messages`, `on_reset`, and `produced_message_types` method from typing import Sequence from autogen_core import CancellationToken from autogen_agentchat.agents import BaseChatAgent -from autogen_agentchat.messages import TextMessage, BaseChatMessage +from autogen_agentchat.messages import TextMessage, ChatMessage from autogen_agentchat.base import Response class CustomAgent(BaseChatAgent): - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: return Response(chat_message=TextMessage(content="Custom reply", source=self.name)) async def on_reset(self, cancellation_token: CancellationToken) -> None: pass @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) ``` @@ -1189,12 +1189,12 @@ from typing import Sequence from autogen_core import CancellationToken from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.teams import RoundRobinGroupChat -from autogen_agentchat.messages import TextMessage, BaseChatMessage +from autogen_agentchat.messages import TextMessage, ChatMessage from autogen_agentchat.base import Response class CountingAgent(BaseChatAgent): """An agent that returns a new number by adding 1 to the last number in the input messages.""" - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: if len(messages) == 0: last_number = 0 # Start from 0 if no messages are given. else: @@ -1206,7 +1206,7 @@ class CountingAgent(BaseChatAgent): pass @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) class NestedCountingAgent(BaseChatAgent): @@ -1216,7 +1216,7 @@ class NestedCountingAgent(BaseChatAgent): super().__init__(name, description="An agent that counts numbers.") self._counting_team = counting_team - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: # Run the inner team with the given messages and returns the last message produced by the team. result = await self._counting_team.run(task=messages, cancellation_token=cancellation_token) # To stream the inner messages, implement `on_messages_stream` and use that to implement `on_messages`. @@ -1228,7 +1228,7 @@ class NestedCountingAgent(BaseChatAgent): await self._counting_team.reset() @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) async def main() -> None: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index 5c1e0027650a..dc1212b73bdf 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -61,7 +61,7 @@ "\n", "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import BaseMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", "from autogen_agentchat.teams import SelectorGroupChat\n", "from autogen_agentchat.ui import Console\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient" @@ -511,7 +511,7 @@ } ], "source": [ - "def selector_func(messages: Sequence[BaseMessage]) -> str | None:\n", + "def selector_func(messages: Sequence[ChatMessage | AgentEvent]) -> str | None:\n", " if messages[-1].source != planning_agent.name:\n", " return planning_agent.name\n", " return None\n", @@ -655,7 +655,7 @@ } ], "source": [ - "def candidate_func(messages: Sequence[BaseMessage]) -> List[str]:\n", + "def candidate_func(messages: Sequence[ChatMessage | AgentEvent]) -> List[str]:\n", " # keep planning_agent first one to plan out the tasks\n", " if messages[-1].source == \"user\":\n", " return [planning_agent.name]\n", @@ -813,7 +813,7 @@ "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", "\n", "\n", - "def selector_func_with_user_proxy(messages: Sequence[BaseMessage]) -> str | None:\n", + "def selector_func_with_user_proxy(messages: Sequence[ChatMessage | AgentEvent]) -> str | None:\n", " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", " # Planning agent should be the first to engage when given a new task, or check progress.\n", " return planning_agent.name\n", @@ -1023,4 +1023,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index c14d008d17c4..9a6b507f1acf 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -1,847 +1,847 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agents\n", - "\n", - "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", - "All agents share the following attributes and methods:\n", - "\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of messages that subclass {py:class}`~autogen_agentchat.messages.BaseChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.BaseMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", - "\n", - "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", - "\n", - "\n", - "## Assistant Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", - "uses a language model and has the ability to use tools." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.messages import TextMessage\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "# Define a tool that searches the web for information.\n", - "async def web_search(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"YOUR_API_KEY\",\n", - ")\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Getting Responses\n", - "\n", - "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", - "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" - ] - } - ], - "source": [ - "async def assistant_run() -> None:\n", - " response = await agent.on_messages(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " )\n", - " print(response.inner_messages)\n", - " print(response.chat_message)\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run()) when running in a script.\n", - "await assistant_run()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", - "returns a {py:class}`~autogen_agentchat.base.Response`\n", - "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", - "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", - "which stores the agent's \"thought process\" that led to the final response.\n", - "\n", - "```{note}\n", - "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", - "will update the internal state of the agent -- it will add the messages to the agent's\n", - "history. So you should call this method with new messages.\n", - "**You should not repeatedly call this method with the same messages or the complete history.**\n", - "```\n", - "\n", - "```{note}\n", - "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", - "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", - "By default, the agent will return the result of the tool call as the final response.\n", - "```\n", - "\n", - "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", - "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Multi-Modal Input\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", - "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from io import BytesIO\n", - "\n", - "import PIL\n", - "import requests\n", - "from autogen_agentchat.messages import MultiModalMessage\n", - "from autogen_core import Image\n", - "\n", - "# Create a multi-modal message with random image and text.\n", - "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", - "img = Image(pil_image)\n", - "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", - "img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" - ] - } - ], - "source": [ - "# Use asyncio.run(...) when running in a script.\n", - "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", - "print(response.chat_message.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", - "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Messages\n", - "\n", - "We can also stream each message as it is generated by the agent by using the\n", - "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", - "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", - "as they appear to the console." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", - "[Prompt tokens: 61, Completion tokens: 16]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", - "---------- assistant ----------\n", - "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", - "[Prompt tokens: 93, Completion tokens: 32]\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 154\n", - "Total completion tokens: 48\n", - "Duration: 4.30 seconds\n" - ] - } - ], - "source": [ - "async def assistant_run_stream() -> None:\n", - " # Option 1: read each message from the stream (as shown in the previous example).\n", - " # async for message in agent.on_messages_stream(\n", - " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " # cancellation_token=CancellationToken(),\n", - " # ):\n", - " # print(message)\n", - "\n", - " # Option 2: use Console to print all messages as they appear.\n", - " await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " ),\n", - " output_stats=True, # Enable stats printing.\n", - " )\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", - "await assistant_run_stream()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", - "returns an asynchronous generator that yields each inner message generated by the agent,\n", - "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", - "\n", - "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", - "\n", - "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Tools\n", - "\n", - "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", - "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", - "such as fetching data from APIs or databases.\n", - "\n", - "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", - "(descriptions of tools and their arguments) and generate a tool call message. \n", - "This capability is known as **Tool Calling** or **Function Calling** and \n", - "is becoming a popular pattern in building intelligent agent-based applications.\n", - "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", - "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", - "\n", - "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", - "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", - "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", - "\n", - "```{note}\n", - "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", - "in the Core User Guide.\n", - "```\n", - "\n", - "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", - "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", - "If your tool does not return a well-formed string in natural language, you\n", - "can add a reflection step to have the model summarize the tool's output,\n", - "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", - "\n", - "### Built-in Tools\n", - "\n", - "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", - "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", - "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", - "\n", - "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", - "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", - "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", - "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Function Tool\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", - "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", - "which can be used as a tool by the agent and automatically generates the tool schema\n", - "from the function signature and docstring.\n", - "\n", - "The `web_search_func` tool is an example of a function tool.\n", - "The schema is automatically generated." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'web_search_func',\n", - " 'description': 'Find information on the web',\n", - " 'parameters': {'type': 'object',\n", - " 'properties': {'query': {'description': 'query',\n", - " 'title': 'Query',\n", - " 'type': 'string'}},\n", - " 'required': ['query'],\n", - " 'additionalProperties': False},\n", - " 'strict': False}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core.tools import FunctionTool\n", - "\n", - "\n", - "# Define a tool using a Python function.\n", - "async def web_search_func(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", - "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", - "# The schema is provided to the model during AssistantAgent's on_messages call.\n", - "web_search_function_tool.schema" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Context Protocol Tools\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", - "served from a Model Context Protocol (MCP) server\n", - "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", - "\n", - "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", - "\n", - "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", - "\n", - "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", - "\n", - "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", - "\n", - "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", - "\n", - "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" - ] - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", - "\n", - "# Get the fetch tool from mcp-server-fetch.\n", - "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", - "tools = await mcp_server_tools(fetch_mcp_server)\n", - "\n", - "# Create an agent that can use the fetch tool.\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", - "\n", - "# Let the agent fetch the content of a URL and summarize it.\n", - "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", - "print(result.messages[-1].content)\n", - "\n", - "# Close the connection to the model client.\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Langchain Tools\n", - "\n", - "You can also use tools from the Langchain library\n", - "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", - "[Prompt tokens: 111, Completion tokens: 22]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", - "---------- assistant ----------\n", - "29.69911764705882\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 111\n", - "Total completion tokens: 22\n", - "Duration: 0.62 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agents\n", + "\n", + "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", + "All agents share the following attributes and methods:\n", + "\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of messages that subclass {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage`, followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", + "\n", + "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", + "\n", + "\n", + "## Assistant Agent\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", + "uses a language model and has the ability to use tools." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a tool that searches the web for information.\n", + "async def web_search(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Getting Responses\n", + "\n", + "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", + "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" + ] + } + ], + "source": [ + "async def assistant_run() -> None:\n", + " response = await agent.on_messages(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " )\n", + " print(response.inner_messages)\n", + " print(response.chat_message)\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run()) when running in a script.\n", + "await assistant_run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", + "returns a {py:class}`~autogen_agentchat.base.Response`\n", + "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", + "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", + "which stores the agent's \"thought process\" that led to the final response.\n", + "\n", + "```{note}\n", + "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", + "will update the internal state of the agent -- it will add the messages to the agent's\n", + "history. So you should call this method with new messages.\n", + "**You should not repeatedly call this method with the same messages or the complete history.**\n", + "```\n", + "\n", + "```{note}\n", + "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", + "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", + "By default, the agent will return the result of the tool call as the final response.\n", + "```\n", + "\n", + "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", + "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multi-Modal Input\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", + "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from io import BytesIO\n", + "\n", + "import PIL\n", + "import requests\n", + "from autogen_agentchat.messages import MultiModalMessage\n", + "from autogen_core import Image\n", + "\n", + "# Create a multi-modal message with random image and text.\n", + "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", + "img = Image(pil_image)\n", + "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", + "img" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" + ] + } + ], + "source": [ + "# Use asyncio.run(...) when running in a script.\n", + "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", + "print(response.chat_message.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", + "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Messages\n", + "\n", + "We can also stream each message as it is generated by the agent by using the\n", + "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", + "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", + "as they appear to the console." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", + "[Prompt tokens: 61, Completion tokens: 16]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", + "---------- assistant ----------\n", + "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", + "[Prompt tokens: 93, Completion tokens: 32]\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 154\n", + "Total completion tokens: 48\n", + "Duration: 4.30 seconds\n" + ] + } + ], + "source": [ + "async def assistant_run_stream() -> None:\n", + " # Option 1: read each message from the stream (as shown in the previous example).\n", + " # async for message in agent.on_messages_stream(\n", + " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " # cancellation_token=CancellationToken(),\n", + " # ):\n", + " # print(message)\n", + "\n", + " # Option 2: use Console to print all messages as they appear.\n", + " await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " ),\n", + " output_stats=True, # Enable stats printing.\n", + " )\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", + "await assistant_run_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", + "returns an asynchronous generator that yields each inner message generated by the agent,\n", + "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", + "\n", + "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", + "gather information and responded based on the search results.\n", + "\n", + "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", + "\n", + "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", + "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", + "such as fetching data from APIs or databases.\n", + "\n", + "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", + "(descriptions of tools and their arguments) and generate a tool call message. \n", + "This capability is known as **Tool Calling** or **Function Calling** and \n", + "is becoming a popular pattern in building intelligent agent-based applications.\n", + "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", + "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", + "\n", + "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", + "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", + "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", + "\n", + "```{note}\n", + "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", + "in the Core User Guide.\n", + "```\n", + "\n", + "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", + "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", + "If your tool does not return a well-formed string in natural language, you\n", + "can add a reflection step to have the model summarize the tool's output,\n", + "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", + "\n", + "### Built-in Tools\n", + "\n", + "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", + "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", + "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", + "\n", + "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", + "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", + "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", + "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Function Tool\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", + "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", + "which can be used as a tool by the agent and automatically generates the tool schema\n", + "from the function signature and docstring.\n", + "\n", + "The `web_search_func` tool is an example of a function tool.\n", + "The schema is automatically generated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'web_search_func',\n", + " 'description': 'Find information on the web',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'query': {'description': 'query',\n", + " 'title': 'Query',\n", + " 'type': 'string'}},\n", + " 'required': ['query'],\n", + " 'additionalProperties': False},\n", + " 'strict': False}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core.tools import FunctionTool\n", + "\n", + "\n", + "# Define a tool using a Python function.\n", + "async def web_search_func(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", + "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", + "# The schema is provided to the model during AssistantAgent's on_messages call.\n", + "web_search_function_tool.schema" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model Context Protocol Tools\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", + "served from a Model Context Protocol (MCP) server\n", + "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", + "\n", + "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", + "\n", + "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", + "\n", + "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", + "\n", + "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", + "\n", + "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", + "\n", + "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" + ] + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", + "\n", + "# Get the fetch tool from mcp-server-fetch.\n", + "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", + "tools = await mcp_server_tools(fetch_mcp_server)\n", + "\n", + "# Create an agent that can use the fetch tool.\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", + "\n", + "# Let the agent fetch the content of a URL and summarize it.\n", + "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", + "print(result.messages[-1].content)\n", + "\n", + "# Close the connection to the model client.\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Langchain Tools\n", + "\n", + "You can also use tools from the Langchain library\n", + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 22]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", + "---------- assistant ----------\n", + "29.69911764705882\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 111\n", + "Total completion tokens: 22\n", + "Duration: 0.62 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "from autogen_ext.tools.langchain import LangChainToolAdapter\n", + "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", + "\n", + "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", + "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(\n", + " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", + ")\n", + "await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", + " ),\n", + " output_stats=True,\n", + ")\n", + "\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parallel Tool Calls\n", + "\n", + "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", + "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", + "will call the tools in parallel.\n", + "\n", + "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", + "when agent behavior needs to be consistent across different models.\n", + "This should be done at the model client level.\n", + "\n", + "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", + "set `parallel_tool_calls=False` to disable parallel tool calls." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " parallel_tool_calls=False, # type: ignore\n", + ")\n", + "agent_no_parallel_tool_call = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client_no_parallel_tool_call,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running an Agent in a Loop\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", + "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", + "an optional reflection.\n", + "\n", + "To run it in a loop, for example, running it until it stops producing\n", + "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Structured Output\n", + "\n", + "Structured output allows models to return structured JSON text with pre-defined schema\n", + "provided by the application. Different from JSON-mode, the schema can be provided\n", + "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", + "class, which can also be used to validate the output. \n", + "\n", + "```{note}\n", + "Structured output is only available for models that support it. It also\n", + "requires the model client to support structured output as well.\n", + "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", + "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", + "support structured output.\n", + "```\n", + "\n", + "Structured output is also useful for incorporating Chain-of-Thought\n", + "reasoning in the agent's responses.\n", + "See the example below for how to use structured output with the assistant agent." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "I am happy.\n", + "---------- assistant ----------\n", + "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from typing import Literal\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "\n", + "# The response format for the agent as a Pydantic base model.\n", + "class AgentResponse(BaseModel):\n", + " thoughts: str\n", + " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " response_format=AgentResponse, # type: ignore\n", + ")\n", + "agent = AssistantAgent(\n", + " \"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", + ")\n", + "\n", + "await Console(agent.run_stream(task=\"I am happy.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Tokens\n", + "\n", + "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", + "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", + "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", + "\n", + "The underlying model API must support streaming tokens for this to work.\n", + "Please check with your model provider to see if this is supported." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" + ] + } + ], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "streaming_assistant = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful assistant.\",\n", + " model_client_stream=True, # Enable streaming tokens.\n", + ")\n", + "\n", + "# Use an async function and asyncio.run() in a script.\n", + "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", + " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + "):\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the streaming chunks in the output above.\n", + "The chunks are generated by the model client and are yielded by the agent as they are received.\n", + "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", + "\n", + "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", + "followed by a full text message right after the last chunk." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" + ] + } + ], + "source": [ + "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Model Context\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", + "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", + "object. This allows the agent to use different model contexts, such as\n", + "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", + "limit the context sent to the model.\n", + "\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", + "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", + "which sends the full conversation history to the model. To limit the context\n", + "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import BufferedChatCompletionContext\n", + "\n", + "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Other Preset Agents\n", + "\n", + "The following preset agents are available:\n", + "\n", + "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", + "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", + "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", + "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", + "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", + "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Step\n", + "\n", + "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import pandas as pd\n", - "from autogen_ext.tools.langchain import LangChainToolAdapter\n", - "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", - "\n", - "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", - "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(\n", - " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", - ")\n", - "await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", - " ),\n", - " output_stats=True,\n", - ")\n", - "\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Parallel Tool Calls\n", - "\n", - "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", - "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", - "will call the tools in parallel.\n", - "\n", - "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", - "when agent behavior needs to be consistent across different models.\n", - "This should be done at the model client level.\n", - "\n", - "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", - "set `parallel_tool_calls=False` to disable parallel tool calls." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " parallel_tool_calls=False, # type: ignore\n", - ")\n", - "agent_no_parallel_tool_call = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client_no_parallel_tool_call,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running an Agent in a Loop\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", - "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", - "an optional reflection.\n", - "\n", - "To run it in a loop, for example, running it until it stops producing\n", - "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Structured Output\n", - "\n", - "Structured output allows models to return structured JSON text with pre-defined schema\n", - "provided by the application. Different from JSON-mode, the schema can be provided\n", - "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", - "class, which can also be used to validate the output. \n", - "\n", - "```{note}\n", - "Structured output is only available for models that support it. It also\n", - "requires the model client to support structured output as well.\n", - "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", - "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", - "support structured output.\n", - "```\n", - "\n", - "Structured output is also useful for incorporating Chain-of-Thought\n", - "reasoning in the agent's responses.\n", - "See the example below for how to use structured output with the assistant agent." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "I am happy.\n", - "---------- assistant ----------\n", - "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from typing import Literal\n", - "\n", - "from pydantic import BaseModel\n", - "\n", - "\n", - "# The response format for the agent as a Pydantic base model.\n", - "class AgentResponse(BaseModel):\n", - " thoughts: str\n", - " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " response_format=AgentResponse, # type: ignore\n", - ")\n", - "agent = AssistantAgent(\n", - " \"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", - ")\n", - "\n", - "await Console(agent.run_stream(task=\"I am happy.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Tokens\n", - "\n", - "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", - "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", - "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", - "\n", - "The underlying model API must support streaming tokens for this to work.\n", - "Please check with your model provider to see if this is supported." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" - ] - } - ], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "streaming_assistant = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful assistant.\",\n", - " model_client_stream=True, # Enable streaming tokens.\n", - ")\n", - "\n", - "# Use an async function and asyncio.run() in a script.\n", - "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", - " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - "):\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see the streaming chunks in the output above.\n", - "The chunks are generated by the model client and are yielded by the agent as they are received.\n", - "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", - "\n", - "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", - "followed by a full text message right after the last chunk." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" - ] - } - ], - "source": [ - "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Model Context\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", - "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", - "object. This allows the agent to use different model contexts, such as\n", - "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", - "limit the context sent to the model.\n", - "\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", - "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", - "which sends the full conversation history to the model. To limit the context\n", - "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.model_context import BufferedChatCompletionContext\n", - "\n", - "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Other Preset Agents\n", - "\n", - "The following preset agents are available:\n", - "\n", - "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", - "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", - "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", - "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", - "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", - "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Step\n", - "\n", - "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb index ff5fc25542c4..a9355ab26345 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb @@ -1,129 +1,128 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Messages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In AutoGen AgentChat, _messages_ facilitate communication and information exchange with other agents, orchestrators, and applications. AgentChat supports various message types, each designed for specific purposes." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Types of Messages\n", - "\n", - "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", - "\n", - "### Agent-Agent Messages\n", - "AgentChat supports many message types for agent-to-agent communication. They belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", - "\n", - "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.messages import TextMessage\n", - "\n", - "text_message = TextMessage(content=\"Hello, world!\", source=\"User\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, the following code snippet demonstrates how to create a multimodal message, which accepts\n", - "a list of strings or {py:class}`~autogen_core.Image` objects:" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from io import BytesIO\n", - "\n", - "import requests\n", - "from autogen_agentchat.messages import MultiModalMessage\n", - "from autogen_core import Image as AGImage\n", - "from PIL import Image\n", - "\n", - "pil_image = Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", - "img = AGImage(pil_image)\n", - "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"User\")\n", - "img" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:class}`~autogen_agentchat.messages.TextMessage` and {py:class}`~autogen_agentchat.messages.MultiModalMessage` we have created can be passed to agents directly via the {py:class}`~autogen_agentchat.base.ChatAgent.on_messages` method, or as tasks given to a team {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method. Messages are also used in the responses of an agent. We will explain these in more detail in [Agents](./agents.ipynb) and [Teams](./teams.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Internal Events\n", - "\n", - "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseAgentEvent`.\n", - "\n", - "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", - "\n", - "Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](../custom-agents.ipynb).\n", - "\n", - "\n", - "You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n", - "" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Messages" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In AutoGen AgentChat, _messages_ facilitate communication and information exchange with other agents, orchestrators, and applications. AgentChat supports various message types, each designed for specific purposes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Types of Messages\n", + "\n", + "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", + "\n", + "### Agent-Agent Messages\n", + "AgentChat supports many message types for agent-to-agent communication. They belong to the subclasses of {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", + "\n", + "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.messages import TextMessage\n", + "\n", + "text_message = TextMessage(content=\"Hello, world!\", source=\"User\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly, the following code snippet demonstrates how to create a multimodal message, which accepts\n", + "a list of strings or {py:class}`~autogen_core.Image` objects:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from io import BytesIO\n", + "\n", + "import requests\n", + "from autogen_agentchat.messages import MultiModalMessage\n", + "from autogen_core import Image as AGImage\n", + "from PIL import Image\n", + "\n", + "pil_image = Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", + "img = AGImage(pil_image)\n", + "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"User\")\n", + "img" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:class}`~autogen_agentchat.messages.TextMessage` and {py:class}`~autogen_agentchat.messages.MultiModalMessage` we have created can be passed to agents directly via the {py:class}`~autogen_agentchat.base.ChatAgent.on_messages` method, or as tasks given to a team {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method. Messages are also used in the responses of an agent. We will explain these in more detail in [Agents](./agents.ipynb) and [Teams](./teams.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Internal Events\n", + "\n", + "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseAgentEvent`.\n", + "\n", + "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", + "\n", + "Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](../custom-agents.ipynb).\n", + "\n", + "\n", + "You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index 653e2eb9b16a..5defef4a0409 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -10,7 +10,7 @@ "\n", "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", "\n", - "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.BaseMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` or {py:class}`~autogen_agentchat.messages.AgentEvent` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", "\n", "Some important things to note about termination conditions: \n", @@ -308,7 +308,7 @@ "from typing import Sequence\n", "\n", "from autogen_agentchat.base import TerminatedException, TerminationCondition\n", - "from autogen_agentchat.messages import BaseMessage, StopMessage, ToolCallExecutionEvent\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, StopMessage, ToolCallExecutionEvent\n", "from autogen_core import Component\n", "from pydantic import BaseModel\n", "from typing_extensions import Self\n", @@ -336,7 +336,7 @@ " def terminated(self) -> bool:\n", " return self._terminated\n", "\n", - " async def __call__(self, messages: Sequence[BaseMessage]) -> StopMessage | None:\n", + " async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None:\n", " if self._terminated:\n", " raise TerminatedException(\"Termination condition has already been reached\")\n", " for message in messages:\n", @@ -514,4 +514,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index 61c768cb20ba..bcbf9f061578 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -6,7 +6,7 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - BaseChatMessage, + ChatMessage, TextMessage, ) from autogen_agentchat.utils import remove_images @@ -84,10 +84,10 @@ def __init__( self._browser = MarkdownFileBrowser(viewport_size=1024 * 5, base_path=base_path) @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (TextMessage,) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: for chat_message in messages: self._chat_history.append(chat_message.content_to_model_message()) try: diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 896e64e3af3d..b0ba54170611 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -24,8 +24,8 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import ( - BaseChatMessage, - BaseMessage, + AgentEvent, + ChatMessage, TextMessage, ToolCallExecutionEvent, ToolCallRequestEvent, @@ -353,7 +353,7 @@ async def _retrieve_initial_state(self) -> None: self._initial_message_ids = initial_message_ids @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: """The types of messages that the assistant agent produces.""" return (TextMessage,) @@ -392,7 +392,7 @@ async def _execute_tool_call(self, tool_call: FunctionCall, cancellation_token: result = await tool.run_json(arguments, cancellation_token) return tool.return_value_as_string(result) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: """Handle incoming messages and return a response.""" async for message in self.on_messages_stream(messages, cancellation_token): @@ -401,8 +401,8 @@ async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_to raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: """Handle incoming messages and return a response.""" await self._ensure_initialized() @@ -411,7 +411,7 @@ async def on_messages_stream( await self.handle_incoming_message(message, cancellation_token) # Inner messages for tool calls - inner_messages: List[BaseMessage] = [] + inner_messages: List[ChatMessage | AgentEvent] = [] # Create and start a run run: Run = await cancellation_token.link_future( @@ -518,7 +518,7 @@ async def on_messages_stream( chat_message = TextMessage(source=self.name, content=text_content[0].text.value) yield Response(chat_message=chat_message, inner_messages=inner_messages) - async def handle_incoming_message(self, message: BaseChatMessage, cancellation_token: CancellationToken) -> None: + async def handle_incoming_message(self, message: ChatMessage, cancellation_token: CancellationToken) -> None: """Handle regular text messages by adding them to the thread.""" content: str | List[MessageContentPartParam] | None = None llm_message = message.content_to_model_message() diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index c4a12adae24a..5be0cfdbaa0d 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -24,7 +24,7 @@ import PIL.Image from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response -from autogen_agentchat.messages import BaseChatMessage, BaseMessage, MultiModalMessage, TextMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage from autogen_agentchat.utils import content_to_str, remove_images from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall from autogen_core import Image as AGImage @@ -385,7 +385,7 @@ async def _set_debug_dir(self, debug_dir: str | None) -> None: ) @property - def produced_message_types(self) -> Sequence[type[BaseChatMessage]]: + def produced_message_types(self) -> Sequence[type[ChatMessage]]: return (MultiModalMessage,) async def on_reset(self, cancellation_token: CancellationToken) -> None: @@ -422,19 +422,19 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: ) ) - async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response: + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( - self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[BaseMessage | Response, None]: + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: for chat_message in messages: self._chat_history.append(chat_message.content_to_model_message()) - self.inner_messages: List[BaseMessage] = [] + self.inner_messages: List[ChatMessage | AgentEvent] = [] self.model_usage: List[RequestUsage] = [] try: content = await self._generate_reply(cancellation_token=cancellation_token) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index 4ff738ffbe92..74bcf3d4435b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -4,7 +4,7 @@ from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import BaseMessage, TextMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage from autogen_core.models import ( ChatCompletionClient, LLMMessage, @@ -190,8 +190,8 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: # Get the agent's response to the task. task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) - messages: Sequence[BaseMessage] = task_result.messages - message: BaseMessage = messages[-1] + messages: Sequence[ChatMessage | AgentEvent] = task_result.messages + message = messages[-1] response_str = message.content # Log the model call diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py index 3a69cb0e1018..a044931492d7 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import BaseMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_core import Image from autogen_core.models import ( AssistantMessage, @@ -343,7 +343,7 @@ def log_model_task( if self.level > self.levels["INFO"]: return None - messages: Sequence[BaseMessage] = task_result.messages + messages: Sequence[ChatMessage | AgentEvent] = task_result.messages message = messages[-1] response_str = message.content if not isinstance(response_str, str): diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index acdaa215b737..addb125d6806 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -13,7 +13,8 @@ from autogen_agentchat.base import Response, TaskResult from autogen_agentchat.messages import ( - BaseMessage, + AgentEvent, + ChatMessage, ModelClientStreamingChunkEvent, MultiModalMessage, UserInputRequestedEvent, @@ -54,7 +55,7 @@ def aprint(output: str, end: str = "\n") -> Awaitable[None]: return asyncio.to_thread(print, output, end=end) -def _extract_message_content(message: BaseMessage) -> Tuple[List[str], List[Image]]: +def _extract_message_content(message: ChatMessage | AgentEvent) -> Tuple[List[str], List[Image]]: if isinstance(message, MultiModalMessage): text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] @@ -98,7 +99,7 @@ async def _aprint_message_content( async def RichConsole( - stream: AsyncGenerator[BaseMessage | T, None], + stream: AsyncGenerator[ChatMessage | AgentEvent | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -115,7 +116,7 @@ async def RichConsole( It will be improved in future releases. Args: - stream (AsyncGenerator[BaseMessage | TaskResult, None] | AsyncGenerator[BaseMessage | Response, None]): Message stream to render. + stream (AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None] | AsyncGenerator[ChatMessage | AgentEvent | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. diff --git a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py index c557e19e8b8f..43bd3447737a 100644 --- a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py +++ b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py @@ -8,7 +8,7 @@ import aiofiles import pytest -from autogen_agentchat.messages import BaseChatMessage, TextMessage +from autogen_agentchat.messages import ChatMessage, TextMessage from autogen_core import CancellationToken from autogen_core.tools._base import BaseTool, Tool from autogen_ext.agents.openai import OpenAIAssistantAgent @@ -81,7 +81,7 @@ def __init__(self, id: str, text: str): class FakeCursorPage: - def __init__(self, data: List[BaseChatMessage | FakeMessage]) -> None: + def __init__(self, data: List[ChatMessage | FakeMessage]) -> None: self.data = data def has_next_page(self) -> bool: diff --git a/python/packages/autogen-studio/autogenstudio/datamodel/types.py b/python/packages/autogen-studio/autogenstudio/datamodel/types.py index 71a62a7124e8..44306dacf095 100644 --- a/python/packages/autogen-studio/autogenstudio/datamodel/types.py +++ b/python/packages/autogen-studio/autogenstudio/datamodel/types.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Literal, Optional from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import BaseChatMessage +from autogen_agentchat.messages import ChatMessage from autogen_core import ComponentModel from autogen_ext.models.openai import OpenAIChatCompletionClient from pydantic import BaseModel, ConfigDict, SecretStr @@ -21,7 +21,7 @@ class TeamResult(BaseModel): duration: float -class LLMCallEventMessage(BaseChatMessage): +class LLMCallEventMessage(ChatMessage): source: str = "llm_call_event" content: str diff --git a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py index b51d32d68e9a..94bd45428679 100644 --- a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py +++ b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py @@ -10,7 +10,7 @@ import yaml from autogen_agentchat.agents import UserProxyAgent from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import BaseMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_agentchat.teams import BaseGroupChat from autogen_core import EVENT_LOGGER_NAME, CancellationToken, ComponentModel from autogen_core.logging import LLMCallEvent @@ -102,7 +102,7 @@ async def run_stream( input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None, env_vars: Optional[List[EnvironmentVariable]] = None, - ) -> AsyncGenerator[Union[BaseMessage | LLMCallEvent, TeamResult], None]: + ) -> AsyncGenerator[Union[ChatMessage | AgentEvent | LLMCallEvent, TeamResult], None]: """Stream team execution results""" start_time = time.time() team = None diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py index 4351b634f528..2ca272f56adc 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py @@ -6,7 +6,8 @@ from autogen_agentchat.base._task import TaskResult from autogen_agentchat.messages import ( - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, ModelClientStreamingChunkEvent, MultiModalMessage, @@ -158,7 +159,7 @@ async def start_stream(self, run_id: int, task: str, team_config: dict) -> None: finally: self._cancellation_tokens.pop(run_id, None) - async def _save_message(self, run_id: int, message: BaseMessage) -> None: + async def _save_message(self, run_id: int, message: AgentEvent | ChatMessage) -> None: """Save a message to the database""" run = await self._get_run(run_id) diff --git a/python/samples/agentchat_streamlit/agent.py b/python/samples/agentchat_streamlit/agent.py index a53f22d9f954..2d03894914a2 100644 --- a/python/samples/agentchat_streamlit/agent.py +++ b/python/samples/agentchat_streamlit/agent.py @@ -1,6 +1,6 @@ import yaml from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.messages import BaseTextChatMessage, TextMessage +from autogen_agentchat.messages import TextChatMessage, TextMessage from autogen_core import CancellationToken from autogen_core.models import ChatCompletionClient @@ -22,5 +22,5 @@ async def chat(self, prompt: str) -> str: [TextMessage(content=prompt, source="user")], CancellationToken(), ) - assert isinstance(response.chat_message, BaseTextChatMessage) + assert isinstance(response.chat_message, TextChatMessage) return response.chat_message.content From 864f75c453ccb8a1da948897c9bcbd7292584e7c Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:30:41 -0700 Subject: [PATCH 14/26] revert changes to agbench --- .../GAIA/Templates/SelectorGroupChat/scenario.py | 14 ++++++++------ .../agbench/src/agbench/linter/__init__.py | 2 +- .../packages/agbench/src/agbench/linter/_base.py | 5 ++--- python/packages/agbench/src/agbench/linter/cli.py | 8 +++----- .../agbench/src/agbench/linter/coders/oai_coder.py | 8 +++++--- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py index 6c2e3a5292e4..e2e1d8fae009 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/SelectorGroupChat/scenario.py @@ -2,20 +2,22 @@ import os import yaml import warnings -from typing import List, Sequence +from typing import Sequence from autogen_ext.agents.magentic_one import MagenticOneCoderAgent from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import MaxMessageTermination from autogen_agentchat.ui import Console from autogen_agentchat.utils import content_to_str +from autogen_core.models import ModelFamily from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor +from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.base import TerminationCondition, TerminatedException from autogen_core.models import ChatCompletionClient from autogen_ext.agents.web_surfer import MultimodalWebSurfer from autogen_ext.agents.file_surfer import FileSurfer from autogen_agentchat.agents import CodeExecutorAgent -from autogen_agentchat.messages import ChatMessage, AgentEvent, TextMessage, MultiModalMessage, StopMessage -from autogen_core.models import LLMMessage, UserMessage +from autogen_agentchat.messages import TextMessage, AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage +from autogen_core.models import LLMMessage, UserMessage, AssistantMessage # Suppress warnings about the requests.Session() not being closed warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) @@ -90,7 +92,7 @@ async def main() -> None: result = await Console(stream) # Do one more inference to format the results - final_context: List[LLMMessage] = [] + final_context: Sequence[LLMMessage] = [] for message in result.messages: if isinstance(message, TextMessage): final_context.append(UserMessage(content=message.content, source=message.source)) @@ -133,13 +135,13 @@ def __init__(self, prompt: str, model_client: ChatCompletionClient, termination_ self._model_client = model_client self._termination_phrase = termination_phrase self._terminated = False - self._context: List[LLMMessage] = [] + self._context: Sequence[LLMMessage] = [] @property def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") diff --git a/python/packages/agbench/src/agbench/linter/__init__.py b/python/packages/agbench/src/agbench/linter/__init__.py index a104962445f6..797b7f272a5b 100644 --- a/python/packages/agbench/src/agbench/linter/__init__.py +++ b/python/packages/agbench/src/agbench/linter/__init__.py @@ -1,4 +1,4 @@ # __init__.py -from ._base import BaseQualitativeCoder, Code, CodedDocument, Document +from ._base import Code, Document, CodedDocument, BaseQualitativeCoder __all__ = ["Code", "Document", "CodedDocument", "BaseQualitativeCoder"] diff --git a/python/packages/agbench/src/agbench/linter/_base.py b/python/packages/agbench/src/agbench/linter/_base.py index c59e826d201b..4f6209b7809c 100644 --- a/python/packages/agbench/src/agbench/linter/_base.py +++ b/python/packages/agbench/src/agbench/linter/_base.py @@ -1,8 +1,7 @@ -import hashlib import json +import hashlib import re -from typing import List, Optional, Protocol, Set - +from typing import Protocol, List, Set, Optional from pydantic import BaseModel, Field diff --git a/python/packages/agbench/src/agbench/linter/cli.py b/python/packages/agbench/src/agbench/linter/cli.py index 14f428929b17..426890258b69 100644 --- a/python/packages/agbench/src/agbench/linter/cli.py +++ b/python/packages/agbench/src/agbench/linter/cli.py @@ -1,10 +1,8 @@ -import argparse import os -from typing import List, Optional, Sequence - +import argparse +from typing import List, Sequence, Optional from openai import OpenAI - -from ._base import CodedDocument, Document +from ._base import Document, CodedDocument from .coders.oai_coder import OAIQualitativeCoder diff --git a/python/packages/agbench/src/agbench/linter/coders/oai_coder.py b/python/packages/agbench/src/agbench/linter/coders/oai_coder.py index 01322e0c5ccc..374093d3d81b 100644 --- a/python/packages/agbench/src/agbench/linter/coders/oai_coder.py +++ b/python/packages/agbench/src/agbench/linter/coders/oai_coder.py @@ -1,11 +1,13 @@ import os import re -from typing import List, Optional, Set -from openai import OpenAI +from typing import List, Set, Optional from pydantic import BaseModel -from .._base import BaseQualitativeCoder, Code, CodedDocument, Document +from openai import OpenAI + +from .._base import CodedDocument, Document, Code +from .._base import BaseQualitativeCoder class CodeList(BaseModel): From adc3350c6bdd83ea8d421ac94ba65a49e1292b31 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:34:08 -0700 Subject: [PATCH 15/26] revert changes to ags --- .../autogen-studio/autogenstudio/datamodel/types.py | 4 ++-- .../autogen-studio/autogenstudio/teammanager/teammanager.py | 6 +++--- .../autogen-studio/autogenstudio/web/managers/connection.py | 5 +++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/python/packages/autogen-studio/autogenstudio/datamodel/types.py b/python/packages/autogen-studio/autogenstudio/datamodel/types.py index 44306dacf095..71a62a7124e8 100644 --- a/python/packages/autogen-studio/autogenstudio/datamodel/types.py +++ b/python/packages/autogen-studio/autogenstudio/datamodel/types.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Literal, Optional from autogen_agentchat.base import TaskResult -from autogen_agentchat.messages import ChatMessage +from autogen_agentchat.messages import BaseChatMessage from autogen_core import ComponentModel from autogen_ext.models.openai import OpenAIChatCompletionClient from pydantic import BaseModel, ConfigDict, SecretStr @@ -21,7 +21,7 @@ class TeamResult(BaseModel): duration: float -class LLMCallEventMessage(ChatMessage): +class LLMCallEventMessage(BaseChatMessage): source: str = "llm_call_event" content: str diff --git a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py index 94bd45428679..96dbaec1098c 100644 --- a/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py +++ b/python/packages/autogen-studio/autogenstudio/teammanager/teammanager.py @@ -9,10 +9,10 @@ import aiofiles import yaml from autogen_agentchat.agents import UserProxyAgent -from autogen_agentchat.base import TaskResult +from autogen_agentchat.base import TaskResult, Team from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_agentchat.teams import BaseGroupChat -from autogen_core import EVENT_LOGGER_NAME, CancellationToken, ComponentModel +from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel from autogen_core.logging import LLMCallEvent from ..datamodel.types import EnvironmentVariable, LLMCallEventMessage, TeamResult @@ -102,7 +102,7 @@ async def run_stream( input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None, env_vars: Optional[List[EnvironmentVariable]] = None, - ) -> AsyncGenerator[Union[ChatMessage | AgentEvent | LLMCallEvent, TeamResult], None]: + ) -> AsyncGenerator[Union[AgentEvent | ChatMessage | LLMCallEvent, ChatMessage, TeamResult], None]: """Stream team execution results""" start_time = time.time() team = None diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py index 2ca272f56adc..458620464dbb 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py @@ -2,7 +2,7 @@ import logging import traceback from datetime import datetime, timezone -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict, Optional, Union from autogen_agentchat.base._task import TaskResult from autogen_agentchat.messages import ( @@ -17,6 +17,7 @@ ToolCallRequestEvent, ) from autogen_core import CancellationToken +from autogen_core import Image as AGImage from fastapi import WebSocket, WebSocketDisconnect from ...database import DatabaseManager @@ -159,7 +160,7 @@ async def start_stream(self, run_id: int, task: str, team_config: dict) -> None: finally: self._cancellation_tokens.pop(run_id, None) - async def _save_message(self, run_id: int, message: AgentEvent | ChatMessage) -> None: + async def _save_message(self, run_id: int, message: Union[AgentEvent | ChatMessage, ChatMessage]) -> None: """Save a message to the database""" run = await self._get_run(run_id) From 1af90dbbcb5677c521e39397964caa48ed6cd1c4 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:45:12 -0700 Subject: [PATCH 16/26] revert changes --- .../src/autogen_agentchat/messages.py | 6 + .../agentchat-user-guide/custom-agents.ipynb | 32 +- .../agentchat-user-guide/migration-guide.md | 17 +- .../selector-group-chat.ipynb | 2048 ++++++++--------- .../tutorial/agents.ipynb | 1690 +++++++------- .../tutorial/messages.ipynb | 255 +- .../tutorial/termination.ipynb | 1030 ++++----- .../src/autogen_ext/tools/http/_http_tool.py | 2 +- .../src/autogen_ext/tools/mcp/_factory.py | 2 +- .../src/autogen_ext/ui/_rich_console.py | 12 +- python/samples/agentchat_chess_game/main.py | 3 +- python/samples/agentchat_streamlit/agent.py | 4 +- 12 files changed, 2553 insertions(+), 2548 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 8f08609b6c66..385166afbb20 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -388,11 +388,17 @@ def create(self, data: Mapping[str, Any]) -> ChatMessage | AgentEvent: assert issubclass(message_class, ChatMessage) or issubclass(message_class, AgentEvent) return message_class.load(data) +# For backward compatibility +BaseAgentEvent = AgentEvent +BaseChatMessage = ChatMessage + __all__ = [ "AgentEvent", "BaseMessage", "ChatMessage", + "BaseChatMessage", + "BaseAgentEvent", "AgentEvent", "TextChatMessage", "ChatMessage", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index dfcbf2e997d7..1a526ee6106b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -34,7 +34,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -77,8 +77,8 @@ "\n", " async def on_messages_stream(\n", " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", - " inner_messages: List[ChatMessage | AgentEvent] = []\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", + " inner_messages: List[AgentEvent | ChatMessage] = []\n", " for i in range(self._count, 0, -1):\n", " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", " inner_messages.append(msg)\n", @@ -206,7 +206,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -319,7 +319,7 @@ "\n", "from autogen_agentchat.agents import BaseChatAgent\n", "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, StructuredMessage\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", "from autogen_core import CancellationToken\n", "from autogen_core.model_context import UnboundedChatCompletionContext\n", "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", @@ -360,14 +360,10 @@ "\n", " async def on_messages_stream(\n", " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", "\n", " # Get conversation history\n", " history = [\n", @@ -545,7 +541,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 40, "metadata": {}, "outputs": [], "source": [ @@ -603,14 +599,10 @@ "\n", " async def on_messages_stream(\n", " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]:\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " if isinstance(msg, StructuredMessage):\n", - " serialized_content = msg.content.model_dump_json()\n", - " await self._model_context.add_message(UserMessage(content=serialized_content, source=msg.source))\n", - " else:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", "\n", " # Get conversation history\n", " history = [\n", @@ -739,9 +731,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 83bc86b989de..d0533435fe42 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -691,7 +691,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_text()) + print("Assistant:", response.chat_message.content) await model_client.close() asyncio.run(main()) @@ -742,7 +742,8 @@ You can use the following conversion functions to convert between a v0.4 message from typing import Any, Dict, List, Literal from autogen_agentchat.messages import ( - BaseMessage, + AgentEvent, + ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -756,14 +757,14 @@ from autogen_core.models import FunctionExecutionResult def convert_to_v02_message( - message: BaseMessage, + message: AgentEvent | ChatMessage, role: Literal["assistant", "user", "tool"], image_detail: Literal["auto", "high", "low"] = "auto", ) -> Dict[str, Any]: """Convert a v0.4 AgentChat message to a v0.2 message. Args: - message (BaseMessage): The message to convert. + message (AgentEvent | ChatMessage): The message to convert. role (Literal["assistant", "user", "tool"]): The role of the message. image_detail (Literal["auto", "high", "low"], optional): The detail level of image content in multi-modal message. Defaults to "auto". @@ -809,7 +810,7 @@ def convert_to_v02_message( return v02_message -def convert_to_v04_message(message: Dict[str, Any]) -> BaseMessage: +def convert_to_v04_message(message: Dict[str, Any]) -> AgentEvent | ChatMessage: """Convert a v0.2 message to a v0.4 AgentChat message.""" if "tool_calls" in message: tool_calls: List[FunctionCall] = [] @@ -1064,7 +1065,7 @@ import asyncio from typing import Sequence from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination -from autogen_agentchat.messages import BaseMessage +from autogen_agentchat.messages import AgentEvent, ChatMessage from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient @@ -1140,7 +1141,7 @@ def create_team(model_client : OpenAIChatCompletionClient) -> SelectorGroupChat: # The selector function is a function that takes the current message thread of the group chat # and returns the next speaker's name. If None is returned, the LLM-based selection method will be used. - def selector_func(messages: Sequence[BaseMessage]) -> str | None: + def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if messages[-1].source != planning_agent.name: return planning_agent.name # Always return to the planning agent after the other agents have spoken. return None @@ -1330,7 +1331,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_text()) + print("Assistant:", response.chat_message.content) await model_client.close() diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index dc1212b73bdf..522c26b2098c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -1,1026 +1,1026 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Selector Group Chat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", - "\n", - "Key features include:\n", - "\n", - "- Model-based speaker selection\n", - "- Configurable participant roles and descriptions\n", - "- Prevention of consecutive turns by the same speaker (optional)\n", - "- Customizable selection prompting\n", - "- Customizable selection function to override the default model-based selection\n", - "- Customizable candidate function to narrow-down the set of agents for selection using model\n", - "\n", - "```{note}\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", - "```\n", - "\n", - "## How Does it Work?\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "but with a model-based next speaker selection mechanism.\n", - "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", - "the following steps are executed:\n", - "\n", - "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", - "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", - "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", - "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", - "\n", - "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", - "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", - "\n", - "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Web Search/Analysis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List, Sequence\n", - "\n", - "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agents\n", - "\n", - "![Selector Group Chat](selector-group-chat.svg)\n", - "\n", - "This system uses three specialized agents:\n", - "\n", - "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", - "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", - "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", - "def search_web_tool(query: str) -> str:\n", - " if \"2006-2007\" in query:\n", - " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \"\"\"\n", - " elif \"2007-2008\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", - " elif \"2008-2009\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", - " return \"No data found.\"\n", - "\n", - "\n", - "def percentage_change_tool(start: float, end: float) -> float:\n", - " return ((end - start) / start) * 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", - "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", - "so it is recommended to provide meaningful names and descriptions." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "planning_agent = AssistantAgent(\n", - " \"PlanningAgent\",\n", - " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a planning agent.\n", - " Your job is to break down complex tasks into smaller, manageable subtasks.\n", - " Your team members are:\n", - " WebSearchAgent: Searches for information\n", - " DataAnalystAgent: Performs calculations\n", - "\n", - " You only plan and delegate tasks - you do not execute them yourself.\n", - "\n", - " When assigning tasks, use this format:\n", - " 1. : \n", - "\n", - " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", - " \"\"\",\n", - ")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a web search agent.\n", - " Your only tool is search_tool - use it to find information.\n", - " You make only one search call at a time.\n", - " Once you have the results, you never do calculations based on them.\n", - " \"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"\n", - " You are a data analyst.\n", - " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", - " If you have not seen the data, ask for it.\n", - " \"\"\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", - "tool output as the response. If your tool does not return a well-formed\n", - "string in natural language format, you may want to add a reflection step\n", - "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", - "This will allow the agent to reflect on the tool output and provide a natural\n", - "language response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Workflow\n", - "\n", - "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", - "\n", - "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", - " ` : `\n", - "\n", - "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", - "\n", - "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", - "\n", - "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", - "\n", - "6. The workflow continues with agents being dynamically selected until either:\n", - " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", - " - An alternative termination condition is met (e.g., a maximum number of messages)\n", - "\n", - "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Termination Conditions\n", - "\n", - "Let's use two termination conditions:\n", - "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", - "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", - "max_messages_termination = MaxMessageTermination(max_messages=25)\n", - "termination = text_mention_termination | max_messages_termination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Selector Prompt\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", - "the next speaker based on the conversation context.\n", - "We will use a custom selector prompt to properly align with the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "Make sure the planner agent has assigned tasks before other agents start working.\n", - "Only select one agent.\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "Try not to overload the model with too much instruction in the selector prompt.\n", - "\n", - "What is too much? It depends on the capabilities of the model you are using.\n", - "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", - "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", - "such as the one used in this example.\n", - "\n", - "Generally, if you find yourself writing multiple conditions for each agent,\n", - "it is a sign that you should consider using a custom selection function,\n", - "or breaking down the task into smaller, sequential tasks to be handled by\n", - "separate agents or teams.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Running the Team\n", - "\n", - "Let's create the team with the agents, termination conditions, and custom selector prompt." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we run the team with a task to find information about an NBA player." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To complete this task, we need to perform the following subtasks:\n", - "\n", - "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", - "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", - "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", - "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "I'll assign these tasks accordingly:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- WebSearchAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", - "\n", - "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this in a script.\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Selector Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Often times we want better control over the selection process.\n", - "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", - "This allows us to implement more complex selection logic and state-based transitions.\n", - "\n", - "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", - "\n", - "```{note}\n", - "Returning `None` from the custom selector function will use the default model-based selection.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we need to follow these steps: \n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", - "3. Calculate the percentage change in his total rebounds between the two seasons.\n", - "\n", - "Let's delegate these tasks:\n", - "\n", - "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", - "\n", - "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", - "\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def selector_func(messages: Sequence[ChatMessage | AgentEvent]) -> str | None:\n", - " if messages[-1].source != planning_agent.name:\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - " selector_func=selector_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", - "\n", - "```{tip}\n", - "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", - "on each turn. \n", - "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", - "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", - "when it has finished running all the tools it needs to run, you can do so by\n", - "checking the last message and returning the agent if it is a\n", - "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Candidate Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", - "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", - "\n", - "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", - "\n", - "\n", - "```{note}\n", - "The `candidate_func` is only valid if `selector_func` is not set.\n", - "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we'll break it down into two main subtasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "21.99074074074074\n", - "---------- PlanningAgent ----------\n", - "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", - "\n", - "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", - "\n", - "Now, let's find the necessary rebound statistics:\n", - "\n", - "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", - "\n", - "Now, let's calculate the percentage change.\n", - "\n", - "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def candidate_func(messages: Sequence[ChatMessage | AgentEvent]) -> List[str]:\n", - " # keep planning_agent first one to plan out the tasks\n", - " if messages[-1].source == \"user\":\n", - " return [planning_agent.name]\n", - "\n", - " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", - " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", - " # then return those specific agents\n", - " last_message = messages[-1]\n", - " if last_message.source == planning_agent.name:\n", - " participants = []\n", - " if web_search_agent.name in last_message.content:\n", - " participants.append(web_search_agent.name)\n", - " if data_analyst_agent.name in last_message.content:\n", - " participants.append(data_analyst_agent.name)\n", - " if participants:\n", - " return participants # SelectorGroupChat will select from the remaining two agents.\n", - "\n", - " # we can assume that the task is finished once the web_search_agent\n", - " # and data_analyst_agent have took their turns, thus we send\n", - " # in planning_agent to terminate the chat\n", - " previous_set_of_agents = set(message.source for message in messages)\n", - " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", - " return [planning_agent.name]\n", - "\n", - " # if no-conditions are met then return all the agents\n", - " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " candidate_func=candidate_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## User Feedback\n", - "\n", - "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", - "provide user feedback during a run.\n", - "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", - "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", - "\n", - "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", - "web search example, we simply add it to the team and update the selector function\n", - "to always check for user feedback after the planning agent speaks.\n", - "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", - "the planning agent tries again, until the user approves." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- PlanningAgent ----------\n", - "To address the user's query, we will need to perform the following tasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Find the total rebounds for that player in the 2007-2008 season.\n", - "3. Find the total rebounds for that player in the 2008-2009 season.\n", - "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - " \n", - "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", - "\n", - "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", - "\n", - "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", - "\n", - "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", - "\n", - "\n", - "def selector_func_with_user_proxy(messages: Sequence[ChatMessage | AgentEvent]) -> str | None:\n", - " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", - " # Planning agent should be the first to engage when given a new task, or check progress.\n", - " return planning_agent.name\n", - " if messages[-1].source == planning_agent.name:\n", - " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", - " # User has approved the plan, proceed to the next agent.\n", - " return None\n", - " # Use the user proxy agent to get the user's approval to proceed.\n", - " return user_proxy_agent.name\n", - " if messages[-1].source == user_proxy_agent.name:\n", - " # If the user does not approve, return to the planning agent.\n", - " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " selector_func=selector_func_with_user_proxy,\n", - " allow_repeated_speaker=True,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, the user's feedback is incorporated into the conversation flow,\n", - "and the user can approve or reject the planning agent's decisions." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Reasoning Models\n", - "\n", - "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", - "and `gemini-1.5-flash` are great at following instructions, so you can\n", - "have relatively detailed instructions in the selector prompt for the team and the \n", - "system messages for each agent to guide their behavior.\n", - "\n", - "However, if you are using a reasoning model like `o3-mini`, you will need to\n", - "keep the selector prompt and system messages as simple and to the point as possible.\n", - "This is because the reasoning models are already good at coming up with their own \n", - "instructions given the context provided to them.\n", - "\n", - "This also means that we don't need a planning agent to break down the task\n", - "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", - "uses a reasoning model can do that on its own.\n", - "\n", - "In the following example, we will use `o3-mini` as the model for the\n", - "agents and the team, and we will not use a planning agent.\n", - "Also, we are keeping the selector prompt and system messages as simple as possible." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", - ")\n", - "\n", - "user_proxy_agent = UserProxyAgent(\n", - " \"UserProxyAgent\",\n", - " description=\"A user to approve or disapprove tasks.\",\n", - ")\n", - "\n", - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "When the task is complete, let the user approve or disapprove the task.\n", - "\"\"\"\n", - "\n", - "team = SelectorGroupChat(\n", - " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination, # Use the same termination condition as before.\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- DataAnalystAgent ----------\n", - "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- DataAnalystAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "Approve. TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "For more guidance on how to prompt reasoning models, see the\n", - "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Selector Group Chat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", + "\n", + "Key features include:\n", + "\n", + "- Model-based speaker selection\n", + "- Configurable participant roles and descriptions\n", + "- Prevention of consecutive turns by the same speaker (optional)\n", + "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "- Customizable candidate function to narrow-down the set of agents for selection using model\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", + "```\n", + "\n", + "## How Does it Work?\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", + "\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", + "\n", + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Web Search/Analysis" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, Sequence\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " WebSearchAgent: Searches for information\n", + " DataAnalystAgent: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " If you have not seen the data, ask for it.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", + "tool output as the response. If your tool does not return a well-formed\n", + "string in natural language format, you may want to add a reflection step\n", + "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", + "This will allow the agent to reflect on the tool output and provide a natural\n", + "language response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Termination Conditions\n", + "\n", + "Let's use two termination conditions:\n", + "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Selector Prompt\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", + "the next speaker based on the conversation context.\n", + "We will use a custom selector prompt to properly align with the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "Make sure the planner agent has assigned tasks before other agents start working.\n", + "Only select one agent.\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "Try not to overload the model with too much instruction in the selector prompt.\n", + "\n", + "What is too much? It depends on the capabilities of the model you are using.\n", + "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", + "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", + "such as the one used in this example.\n", + "\n", + "Generally, if you find yourself writing multiple conditions for each agent,\n", + "it is a sign that you should consider using a custom selection function,\n", + "or breaking down the task into smaller, sequential tasks to be handled by\n", + "separate agents or teams.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Running the Team\n", + "\n", + "Let's create the team with the agents, termination conditions, and custom selector prompt." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To complete this task, we need to perform the following subtasks:\n", + "\n", + "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", + "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", + "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", + "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "I'll assign these tasks accordingly:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", + "\n", + "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process.\n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "This allows us to implement more complex selection logic and state-based transitions.\n", + "\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we need to follow these steps: \n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", + "3. Calculate the percentage change in his total rebounds between the two seasons.\n", + "\n", + "Let's delegate these tasks:\n", + "\n", + "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", + "\n", + "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", + "\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + " selector_func=selector_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", + "\n", + "```{tip}\n", + "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", + "on each turn. \n", + "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", + "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", + "when it has finished running all the tools it needs to run, you can do so by\n", + "checking the last message and returning the agent if it is a\n", + "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Candidate Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", + "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", + "\n", + "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", + "\n", + "\n", + "```{note}\n", + "The `candidate_func` is only valid if `selector_func` is not set.\n", + "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we'll break it down into two main subtasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "21.99074074074074\n", + "---------- PlanningAgent ----------\n", + "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", + "\n", + "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", + "\n", + "Now, let's find the necessary rebound statistics:\n", + "\n", + "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", + "\n", + "Now, let's calculate the percentage change.\n", + "\n", + "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]:\n", + " # keep planning_agent first one to plan out the tasks\n", + " if messages[-1].source == \"user\":\n", + " return [planning_agent.name]\n", + "\n", + " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", + " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", + " # then return those specific agents\n", + " last_message = messages[-1]\n", + " if last_message.source == planning_agent.name:\n", + " participants = []\n", + " if web_search_agent.name in last_message.content:\n", + " participants.append(web_search_agent.name)\n", + " if data_analyst_agent.name in last_message.content:\n", + " participants.append(data_analyst_agent.name)\n", + " if participants:\n", + " return participants # SelectorGroupChat will select from the remaining two agents.\n", + "\n", + " # we can assume that the task is finished once the web_search_agent\n", + " # and data_analyst_agent have took their turns, thus we send\n", + " # in planning_agent to terminate the chat\n", + " previous_set_of_agents = set(message.source for message in messages)\n", + " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", + " return [planning_agent.name]\n", + "\n", + " # if no-conditions are met then return all the agents\n", + " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " candidate_func=candidate_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User Feedback\n", + "\n", + "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", + "provide user feedback during a run.\n", + "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", + "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", + "\n", + "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", + "web search example, we simply add it to the team and update the selector function\n", + "to always check for user feedback after the planning agent speaks.\n", + "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", + "the planning agent tries again, until the user approves." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- PlanningAgent ----------\n", + "To address the user's query, we will need to perform the following tasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Find the total rebounds for that player in the 2007-2008 season.\n", + "3. Find the total rebounds for that player in the 2008-2009 season.\n", + "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + " \n", + "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", + "\n", + "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", + "\n", + "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", + "\n", + "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", + "\n", + "\n", + "def selector_func_with_user_proxy(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", + " # Planning agent should be the first to engage when given a new task, or check progress.\n", + " return planning_agent.name\n", + " if messages[-1].source == planning_agent.name:\n", + " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", + " # User has approved the plan, proceed to the next agent.\n", + " return None\n", + " # Use the user proxy agent to get the user's approval to proceed.\n", + " return user_proxy_agent.name\n", + " if messages[-1].source == user_proxy_agent.name:\n", + " # If the user does not approve, return to the planning agent.\n", + " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " selector_func=selector_func_with_user_proxy,\n", + " allow_repeated_speaker=True,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, the user's feedback is incorporated into the conversation flow,\n", + "and the user can approve or reject the planning agent's decisions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Reasoning Models\n", + "\n", + "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", + "and `gemini-1.5-flash` are great at following instructions, so you can\n", + "have relatively detailed instructions in the selector prompt for the team and the \n", + "system messages for each agent to guide their behavior.\n", + "\n", + "However, if you are using a reasoning model like `o3-mini`, you will need to\n", + "keep the selector prompt and system messages as simple and to the point as possible.\n", + "This is because the reasoning models are already good at coming up with their own \n", + "instructions given the context provided to them.\n", + "\n", + "This also means that we don't need a planning agent to break down the task\n", + "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", + "uses a reasoning model can do that on its own.\n", + "\n", + "In the following example, we will use `o3-mini` as the model for the\n", + "agents and the team, and we will not use a planning agent.\n", + "Also, we are keeping the selector prompt and system messages as simple as possible." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", + ")\n", + "\n", + "user_proxy_agent = UserProxyAgent(\n", + " \"UserProxyAgent\",\n", + " description=\"A user to approve or disapprove tasks.\",\n", + ")\n", + "\n", + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "When the task is complete, let the user approve or disapprove the task.\n", + "\"\"\"\n", + "\n", + "team = SelectorGroupChat(\n", + " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination, # Use the same termination condition as before.\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- DataAnalystAgent ----------\n", + "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- DataAnalystAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "Approve. TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "For more guidance on how to prompt reasoning models, see the\n", + "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 9a6b507f1acf..35c9052dee75 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -1,847 +1,847 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agents\n", - "\n", - "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", - "All agents share the following attributes and methods:\n", - "\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of messages that subclass {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage`, followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", - "\n", - "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", - "\n", - "\n", - "## Assistant Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", - "uses a language model and has the ability to use tools." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.messages import TextMessage\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "# Define a tool that searches the web for information.\n", - "async def web_search(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"YOUR_API_KEY\",\n", - ")\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Getting Responses\n", - "\n", - "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", - "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" - ] - } - ], - "source": [ - "async def assistant_run() -> None:\n", - " response = await agent.on_messages(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " )\n", - " print(response.inner_messages)\n", - " print(response.chat_message)\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run()) when running in a script.\n", - "await assistant_run()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", - "returns a {py:class}`~autogen_agentchat.base.Response`\n", - "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", - "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", - "which stores the agent's \"thought process\" that led to the final response.\n", - "\n", - "```{note}\n", - "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", - "will update the internal state of the agent -- it will add the messages to the agent's\n", - "history. So you should call this method with new messages.\n", - "**You should not repeatedly call this method with the same messages or the complete history.**\n", - "```\n", - "\n", - "```{note}\n", - "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", - "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", - "By default, the agent will return the result of the tool call as the final response.\n", - "```\n", - "\n", - "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", - "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Multi-Modal Input\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", - "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from io import BytesIO\n", - "\n", - "import PIL\n", - "import requests\n", - "from autogen_agentchat.messages import MultiModalMessage\n", - "from autogen_core import Image\n", - "\n", - "# Create a multi-modal message with random image and text.\n", - "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", - "img = Image(pil_image)\n", - "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", - "img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" - ] - } - ], - "source": [ - "# Use asyncio.run(...) when running in a script.\n", - "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", - "print(response.chat_message.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", - "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Messages\n", - "\n", - "We can also stream each message as it is generated by the agent by using the\n", - "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", - "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", - "as they appear to the console." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", - "[Prompt tokens: 61, Completion tokens: 16]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", - "---------- assistant ----------\n", - "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", - "[Prompt tokens: 93, Completion tokens: 32]\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 154\n", - "Total completion tokens: 48\n", - "Duration: 4.30 seconds\n" - ] - } - ], - "source": [ - "async def assistant_run_stream() -> None:\n", - " # Option 1: read each message from the stream (as shown in the previous example).\n", - " # async for message in agent.on_messages_stream(\n", - " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " # cancellation_token=CancellationToken(),\n", - " # ):\n", - " # print(message)\n", - "\n", - " # Option 2: use Console to print all messages as they appear.\n", - " await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " ),\n", - " output_stats=True, # Enable stats printing.\n", - " )\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", - "await assistant_run_stream()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", - "returns an asynchronous generator that yields each inner message generated by the agent,\n", - "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", - "\n", - "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", - "\n", - "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Tools\n", - "\n", - "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", - "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", - "such as fetching data from APIs or databases.\n", - "\n", - "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", - "(descriptions of tools and their arguments) and generate a tool call message. \n", - "This capability is known as **Tool Calling** or **Function Calling** and \n", - "is becoming a popular pattern in building intelligent agent-based applications.\n", - "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", - "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", - "\n", - "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", - "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", - "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", - "\n", - "```{note}\n", - "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", - "in the Core User Guide.\n", - "```\n", - "\n", - "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", - "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", - "If your tool does not return a well-formed string in natural language, you\n", - "can add a reflection step to have the model summarize the tool's output,\n", - "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", - "\n", - "### Built-in Tools\n", - "\n", - "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", - "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", - "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", - "\n", - "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", - "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", - "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", - "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Function Tool\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", - "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", - "which can be used as a tool by the agent and automatically generates the tool schema\n", - "from the function signature and docstring.\n", - "\n", - "The `web_search_func` tool is an example of a function tool.\n", - "The schema is automatically generated." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'web_search_func',\n", - " 'description': 'Find information on the web',\n", - " 'parameters': {'type': 'object',\n", - " 'properties': {'query': {'description': 'query',\n", - " 'title': 'Query',\n", - " 'type': 'string'}},\n", - " 'required': ['query'],\n", - " 'additionalProperties': False},\n", - " 'strict': False}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core.tools import FunctionTool\n", - "\n", - "\n", - "# Define a tool using a Python function.\n", - "async def web_search_func(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", - "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", - "# The schema is provided to the model during AssistantAgent's on_messages call.\n", - "web_search_function_tool.schema" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Context Protocol Tools\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", - "served from a Model Context Protocol (MCP) server\n", - "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", - "\n", - "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", - "\n", - "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", - "\n", - "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", - "\n", - "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", - "\n", - "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", - "\n", - "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" - ] - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", - "\n", - "# Get the fetch tool from mcp-server-fetch.\n", - "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", - "tools = await mcp_server_tools(fetch_mcp_server)\n", - "\n", - "# Create an agent that can use the fetch tool.\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", - "\n", - "# Let the agent fetch the content of a URL and summarize it.\n", - "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", - "print(result.messages[-1].content)\n", - "\n", - "# Close the connection to the model client.\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Langchain Tools\n", - "\n", - "You can also use tools from the Langchain library\n", - "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", - "[Prompt tokens: 111, Completion tokens: 22]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", - "---------- assistant ----------\n", - "29.69911764705882\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 111\n", - "Total completion tokens: 22\n", - "Duration: 0.62 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import pandas as pd\n", - "from autogen_ext.tools.langchain import LangChainToolAdapter\n", - "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", - "\n", - "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", - "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(\n", - " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", - ")\n", - "await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", - " ),\n", - " output_stats=True,\n", - ")\n", - "\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Parallel Tool Calls\n", - "\n", - "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", - "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", - "will call the tools in parallel.\n", - "\n", - "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", - "when agent behavior needs to be consistent across different models.\n", - "This should be done at the model client level.\n", - "\n", - "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", - "set `parallel_tool_calls=False` to disable parallel tool calls." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " parallel_tool_calls=False, # type: ignore\n", - ")\n", - "agent_no_parallel_tool_call = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client_no_parallel_tool_call,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running an Agent in a Loop\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", - "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", - "an optional reflection.\n", - "\n", - "To run it in a loop, for example, running it until it stops producing\n", - "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Structured Output\n", - "\n", - "Structured output allows models to return structured JSON text with pre-defined schema\n", - "provided by the application. Different from JSON-mode, the schema can be provided\n", - "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", - "class, which can also be used to validate the output. \n", - "\n", - "```{note}\n", - "Structured output is only available for models that support it. It also\n", - "requires the model client to support structured output as well.\n", - "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", - "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", - "support structured output.\n", - "```\n", - "\n", - "Structured output is also useful for incorporating Chain-of-Thought\n", - "reasoning in the agent's responses.\n", - "See the example below for how to use structured output with the assistant agent." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "I am happy.\n", - "---------- assistant ----------\n", - "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from typing import Literal\n", - "\n", - "from pydantic import BaseModel\n", - "\n", - "\n", - "# The response format for the agent as a Pydantic base model.\n", - "class AgentResponse(BaseModel):\n", - " thoughts: str\n", - " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " response_format=AgentResponse, # type: ignore\n", - ")\n", - "agent = AssistantAgent(\n", - " \"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", - ")\n", - "\n", - "await Console(agent.run_stream(task=\"I am happy.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Tokens\n", - "\n", - "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", - "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", - "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", - "\n", - "The underlying model API must support streaming tokens for this to work.\n", - "Please check with your model provider to see if this is supported." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" - ] - } - ], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "streaming_assistant = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful assistant.\",\n", - " model_client_stream=True, # Enable streaming tokens.\n", - ")\n", - "\n", - "# Use an async function and asyncio.run() in a script.\n", - "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", - " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - "):\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see the streaming chunks in the output above.\n", - "The chunks are generated by the model client and are yielded by the agent as they are received.\n", - "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", - "\n", - "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", - "followed by a full text message right after the last chunk." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" - ] - } - ], - "source": [ - "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Model Context\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", - "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", - "object. This allows the agent to use different model contexts, such as\n", - "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", - "limit the context sent to the model.\n", - "\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", - "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", - "which sends the full conversation history to the model. To limit the context\n", - "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.model_context import BufferedChatCompletionContext\n", - "\n", - "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Other Preset Agents\n", - "\n", - "The following preset agents are available:\n", - "\n", - "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", - "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", - "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", - "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", - "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", - "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Step\n", - "\n", - "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agents\n", + "\n", + "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", + "All agents share the following attributes and methods:\n", + "\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", + "\n", + "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", + "\n", + "\n", + "## Assistant Agent\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", + "uses a language model and has the ability to use tools." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a tool that searches the web for information.\n", + "async def web_search(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Getting Responses\n", + "\n", + "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", + "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" + ] + } + ], + "source": [ + "async def assistant_run() -> None:\n", + " response = await agent.on_messages(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " )\n", + " print(response.inner_messages)\n", + " print(response.chat_message)\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run()) when running in a script.\n", + "await assistant_run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", + "returns a {py:class}`~autogen_agentchat.base.Response`\n", + "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", + "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", + "which stores the agent's \"thought process\" that led to the final response.\n", + "\n", + "```{note}\n", + "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", + "will update the internal state of the agent -- it will add the messages to the agent's\n", + "history. So you should call this method with new messages.\n", + "**You should not repeatedly call this method with the same messages or the complete history.**\n", + "```\n", + "\n", + "```{note}\n", + "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", + "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", + "By default, the agent will return the result of the tool call as the final response.\n", + "```\n", + "\n", + "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", + "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multi-Modal Input\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", + "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from io import BytesIO\n", + "\n", + "import PIL\n", + "import requests\n", + "from autogen_agentchat.messages import MultiModalMessage\n", + "from autogen_core import Image\n", + "\n", + "# Create a multi-modal message with random image and text.\n", + "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", + "img = Image(pil_image)\n", + "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", + "img" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" + ] + } + ], + "source": [ + "# Use asyncio.run(...) when running in a script.\n", + "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", + "print(response.chat_message.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", + "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Messages\n", + "\n", + "We can also stream each message as it is generated by the agent by using the\n", + "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", + "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", + "as they appear to the console." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", + "[Prompt tokens: 61, Completion tokens: 16]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", + "---------- assistant ----------\n", + "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", + "[Prompt tokens: 93, Completion tokens: 32]\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 154\n", + "Total completion tokens: 48\n", + "Duration: 4.30 seconds\n" + ] + } + ], + "source": [ + "async def assistant_run_stream() -> None:\n", + " # Option 1: read each message from the stream (as shown in the previous example).\n", + " # async for message in agent.on_messages_stream(\n", + " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " # cancellation_token=CancellationToken(),\n", + " # ):\n", + " # print(message)\n", + "\n", + " # Option 2: use Console to print all messages as they appear.\n", + " await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " ),\n", + " output_stats=True, # Enable stats printing.\n", + " )\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", + "await assistant_run_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", + "returns an asynchronous generator that yields each inner message generated by the agent,\n", + "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", + "\n", + "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", + "gather information and responded based on the search results.\n", + "\n", + "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", + "\n", + "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", + "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", + "such as fetching data from APIs or databases.\n", + "\n", + "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", + "(descriptions of tools and their arguments) and generate a tool call message. \n", + "This capability is known as **Tool Calling** or **Function Calling** and \n", + "is becoming a popular pattern in building intelligent agent-based applications.\n", + "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", + "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", + "\n", + "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", + "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", + "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", + "\n", + "```{note}\n", + "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", + "in the Core User Guide.\n", + "```\n", + "\n", + "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", + "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", + "If your tool does not return a well-formed string in natural language, you\n", + "can add a reflection step to have the model summarize the tool's output,\n", + "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", + "\n", + "### Built-in Tools\n", + "\n", + "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", + "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", + "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", + "\n", + "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", + "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", + "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", + "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Function Tool\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", + "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", + "which can be used as a tool by the agent and automatically generates the tool schema\n", + "from the function signature and docstring.\n", + "\n", + "The `web_search_func` tool is an example of a function tool.\n", + "The schema is automatically generated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'web_search_func',\n", + " 'description': 'Find information on the web',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'query': {'description': 'query',\n", + " 'title': 'Query',\n", + " 'type': 'string'}},\n", + " 'required': ['query'],\n", + " 'additionalProperties': False},\n", + " 'strict': False}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core.tools import FunctionTool\n", + "\n", + "\n", + "# Define a tool using a Python function.\n", + "async def web_search_func(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", + "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", + "# The schema is provided to the model during AssistantAgent's on_messages call.\n", + "web_search_function_tool.schema" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model Context Protocol Tools\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", + "served from a Model Context Protocol (MCP) server\n", + "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", + "\n", + "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", + "\n", + "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", + "\n", + "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", + "\n", + "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", + "\n", + "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", + "\n", + "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" + ] + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", + "\n", + "# Get the fetch tool from mcp-server-fetch.\n", + "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", + "tools = await mcp_server_tools(fetch_mcp_server)\n", + "\n", + "# Create an agent that can use the fetch tool.\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", + "\n", + "# Let the agent fetch the content of a URL and summarize it.\n", + "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", + "print(result.messages[-1].content)\n", + "\n", + "# Close the connection to the model client.\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Langchain Tools\n", + "\n", + "You can also use tools from the Langchain library\n", + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 22]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", + "---------- assistant ----------\n", + "29.69911764705882\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 111\n", + "Total completion tokens: 22\n", + "Duration: 0.62 seconds\n" + ] }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file + { + "data": { + "text/plain": [ + "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "from autogen_ext.tools.langchain import LangChainToolAdapter\n", + "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", + "\n", + "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", + "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(\n", + " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", + ")\n", + "await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", + " ),\n", + " output_stats=True,\n", + ")\n", + "\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parallel Tool Calls\n", + "\n", + "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", + "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", + "will call the tools in parallel.\n", + "\n", + "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", + "when agent behavior needs to be consistent across different models.\n", + "This should be done at the model client level.\n", + "\n", + "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", + "set `parallel_tool_calls=False` to disable parallel tool calls." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " parallel_tool_calls=False, # type: ignore\n", + ")\n", + "agent_no_parallel_tool_call = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client_no_parallel_tool_call,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running an Agent in a Loop\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", + "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", + "an optional reflection.\n", + "\n", + "To run it in a loop, for example, running it until it stops producing\n", + "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Structured Output\n", + "\n", + "Structured output allows models to return structured JSON text with pre-defined schema\n", + "provided by the application. Different from JSON-mode, the schema can be provided\n", + "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", + "class, which can also be used to validate the output. \n", + "\n", + "```{note}\n", + "Structured output is only available for models that support it. It also\n", + "requires the model client to support structured output as well.\n", + "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", + "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", + "support structured output.\n", + "```\n", + "\n", + "Structured output is also useful for incorporating Chain-of-Thought\n", + "reasoning in the agent's responses.\n", + "See the example below for how to use structured output with the assistant agent." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "I am happy.\n", + "---------- assistant ----------\n", + "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from typing import Literal\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "\n", + "# The response format for the agent as a Pydantic base model.\n", + "class AgentResponse(BaseModel):\n", + " thoughts: str\n", + " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " response_format=AgentResponse, # type: ignore\n", + ")\n", + "agent = AssistantAgent(\n", + " \"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", + ")\n", + "\n", + "await Console(agent.run_stream(task=\"I am happy.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Tokens\n", + "\n", + "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", + "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", + "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", + "\n", + "The underlying model API must support streaming tokens for this to work.\n", + "Please check with your model provider to see if this is supported." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" + ] + } + ], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "streaming_assistant = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful assistant.\",\n", + " model_client_stream=True, # Enable streaming tokens.\n", + ")\n", + "\n", + "# Use an async function and asyncio.run() in a script.\n", + "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", + " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + "):\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the streaming chunks in the output above.\n", + "The chunks are generated by the model client and are yielded by the agent as they are received.\n", + "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", + "\n", + "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", + "followed by a full text message right after the last chunk." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" + ] + } + ], + "source": [ + "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Model Context\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", + "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", + "object. This allows the agent to use different model contexts, such as\n", + "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", + "limit the context sent to the model.\n", + "\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", + "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", + "which sends the full conversation history to the model. To limit the context\n", + "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import BufferedChatCompletionContext\n", + "\n", + "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Other Preset Agents\n", + "\n", + "The following preset agents are available:\n", + "\n", + "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", + "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", + "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", + "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", + "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", + "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Step\n", + "\n", + "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb index a9355ab26345..a3f5a25519dd 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/messages.ipynb @@ -1,128 +1,129 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Messages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In AutoGen AgentChat, _messages_ facilitate communication and information exchange with other agents, orchestrators, and applications. AgentChat supports various message types, each designed for specific purposes." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Types of Messages\n", - "\n", - "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", - "\n", - "### Agent-Agent Messages\n", - "AgentChat supports many message types for agent-to-agent communication. They belong to the subclasses of {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", - "\n", - "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.messages import TextMessage\n", - "\n", - "text_message = TextMessage(content=\"Hello, world!\", source=\"User\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, the following code snippet demonstrates how to create a multimodal message, which accepts\n", - "a list of strings or {py:class}`~autogen_core.Image` objects:" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from io import BytesIO\n", - "\n", - "import requests\n", - "from autogen_agentchat.messages import MultiModalMessage\n", - "from autogen_core import Image as AGImage\n", - "from PIL import Image\n", - "\n", - "pil_image = Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", - "img = AGImage(pil_image)\n", - "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"User\")\n", - "img" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:class}`~autogen_agentchat.messages.TextMessage` and {py:class}`~autogen_agentchat.messages.MultiModalMessage` we have created can be passed to agents directly via the {py:class}`~autogen_agentchat.base.ChatAgent.on_messages` method, or as tasks given to a team {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method. Messages are also used in the responses of an agent. We will explain these in more detail in [Agents](./agents.ipynb) and [Teams](./teams.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Internal Events\n", - "\n", - "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the subclasses of {py:class}`~autogen_agentchat.messages.BaseAgentEvent`.\n", - "\n", - "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", - "\n", - "Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](../custom-agents.ipynb).\n", - "\n", - "\n", - "You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Messages" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In AutoGen AgentChat, _messages_ facilitate communication and information exchange with other agents, orchestrators, and applications. AgentChat supports various message types, each designed for specific purposes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Types of Messages\n", + "\n", + "At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n", + "\n", + "### Agent-Agent Messages\n", + "AgentChat supports many message types for agent-to-agent communication. They belong to the union type {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n", + "\n", + "For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.messages import TextMessage\n", + "\n", + "text_message = TextMessage(content=\"Hello, world!\", source=\"User\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly, the following code snippet demonstrates how to create a multimodal message, which accepts\n", + "a list of strings or {py:class}`~autogen_core.Image` objects:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from io import BytesIO\n", + "\n", + "import requests\n", + "from autogen_agentchat.messages import MultiModalMessage\n", + "from autogen_core import Image as AGImage\n", + "from PIL import Image\n", + "\n", + "pil_image = Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", + "img = AGImage(pil_image)\n", + "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"User\")\n", + "img" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:class}`~autogen_agentchat.messages.TextMessage` and {py:class}`~autogen_agentchat.messages.MultiModalMessage` we have created can be passed to agents directly via the {py:class}`~autogen_agentchat.base.ChatAgent.on_messages` method, or as tasks given to a team {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` method. Messages are also used in the responses of an agent. We will explain these in more detail in [Agents](./agents.ipynb) and [Teams](./teams.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Internal Events\n", + "\n", + "AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the union type {py:class}`~autogen_agentchat.messages.AgentEvent`.\n", + "\n", + "Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n", + "\n", + "Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](../custom-agents.ipynb).\n", + "\n", + "\n", + "You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n", + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index 5defef4a0409..8c9701550dad 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -1,517 +1,517 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Termination \n", - "\n", - "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", - "\n", - "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", - "\n", - "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` or {py:class}`~autogen_agentchat.messages.AgentEvent` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", - "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", - "\n", - "Some important things to note about termination conditions: \n", - "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", - "- They can be combined using the AND and OR operators.\n", - "\n", - "```{note}\n", - "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", - "the termination condition is called after each agent responds.\n", - "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", - "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Built-In Termination Conditions: \n", - "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", - "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", - "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", - "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", - "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", - "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", - "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", - "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent.\n", - "9. {py:class}`~autogen_agentchat.conditions.TextMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.TextMessage` is produced by an agent.\n", - "10. {py:class}`~autogen_agentchat.conditions.FunctionCallTermination`: Stops when a {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent` containing a {py:class}`~autogen_core.models.FunctionExecutionResult` with a matching name is produced by an agent." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Basic Usage\n", - "\n", - "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " temperature=1,\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Gentle rain whispers, \n", - "Cobblestones glisten softly— \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 30, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", - "\n", - "For example:\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "\n", - "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", - "[Prompt tokens: 70, Completion tokens: 120]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 100\n", - "Total completion tokens: 139\n", - "Duration: 3.34 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=3)\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- primary ----------\n", - "Thank you for your feedback. Here is the revised Haiku:\n", - "\n", - "Soft rain whispers down, \n", - "Cobblestones glisten softly — \n", - "Paris dreams in gray.\n", - "[Prompt tokens: 181, Completion tokens: 32]\n", - "---------- critic ----------\n", - "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", - "\n", - "APPROVE\n", - "[Prompt tokens: 234, Completion tokens: 54]\n", - "---------- primary ----------\n", - "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", - "[Prompt tokens: 279, Completion tokens: 39]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", - "Total prompt tokens: 694\n", - "Total completion tokens: 125\n", - "Duration: 6.43 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The team continued from where it left off, allowing the primary agent to respond to the feedback." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Combining Termination Conditions\n", - "\n", - "Let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Spring breeze gently hums, \n", - "Cherry blossoms in full bloom— \n", - "Paris wakes to life.\n", - "[Prompt tokens: 467, Completion tokens: 19]\n", - "---------- critic ----------\n", - "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", - "\n", - "APPROVE\n", - "[Prompt tokens: 746, Completion tokens: 93]\n", - "---------- Summary ----------\n", - "Number of messages: 3\n", - "Finish reason: Text 'APPROVE' mentioned\n", - "Total prompt tokens: 1213\n", - "Total completion tokens: 112\n", - "Duration: 2.75 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "max_msg_termination = MaxMessageTermination(max_messages=10)\n", - "text_termination = TextMentionTermination(\"APPROVE\")\n", - "combined_termination = max_msg_termination | text_termination\n", - "\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", - "\n", - "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "combined_termination = max_msg_termination & text_termination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Termination Condition\n", - "\n", - "The built-in termination conditions are sufficient for most use cases.\n", - "However, there may be cases where you need to implement a custom termination condition that doesn't fit into the existing ones.\n", - "You can do this by subclassing the {py:class}`~autogen_agentchat.base.TerminationCondition` class.\n", - "\n", - "In this example, we create a custom termination condition that stops the conversation when\n", - "a specific function call is made." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Sequence\n", - "\n", - "from autogen_agentchat.base import TerminatedException, TerminationCondition\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, StopMessage, ToolCallExecutionEvent\n", - "from autogen_core import Component\n", - "from pydantic import BaseModel\n", - "from typing_extensions import Self\n", - "\n", - "\n", - "class FunctionCallTerminationConfig(BaseModel):\n", - " \"\"\"Configuration for the termination condition to allow for serialization\n", - " and deserialization of the component.\n", - " \"\"\"\n", - "\n", - " function_name: str\n", - "\n", - "\n", - "class FunctionCallTermination(TerminationCondition, Component[FunctionCallTerminationConfig]):\n", - " \"\"\"Terminate the conversation if a FunctionExecutionResult with a specific name is received.\"\"\"\n", - "\n", - " component_config_schema = FunctionCallTerminationConfig\n", - " \"\"\"The schema for the component configuration.\"\"\"\n", - "\n", - " def __init__(self, function_name: str) -> None:\n", - " self._terminated = False\n", - " self._function_name = function_name\n", - "\n", - " @property\n", - " def terminated(self) -> bool:\n", - " return self._terminated\n", - "\n", - " async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None:\n", - " if self._terminated:\n", - " raise TerminatedException(\"Termination condition has already been reached\")\n", - " for message in messages:\n", - " if isinstance(message, ToolCallExecutionEvent):\n", - " for execution in message.content:\n", - " if execution.name == self._function_name:\n", - " self._terminated = True\n", - " return StopMessage(\n", - " content=f\"Function '{self._function_name}' was executed.\",\n", - " source=\"FunctionCallTermination\",\n", - " )\n", - " return None\n", - "\n", - " async def reset(self) -> None:\n", - " self._terminated = False\n", - "\n", - " def _to_config(self) -> FunctionCallTerminationConfig:\n", - " return FunctionCallTerminationConfig(\n", - " function_name=self._function_name,\n", - " )\n", - "\n", - " @classmethod\n", - " def _from_config(cls, config: FunctionCallTerminationConfig) -> Self:\n", - " return cls(\n", - " function_name=config.function_name,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use this new termination condition to stop the conversation when the critic agent approves a message\n", - "using the `approve` function call.\n", - "\n", - "First we create a simple function that will be called when the critic agent approves a message." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def approve() -> None:\n", - " \"\"\"Approve the message when all feedbacks have been addressed.\"\"\"\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then we create the agents. The critic agent is equipped with the `approve` tool." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " temperature=1,\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", - ")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create the critic agent with the approve function as a tool.\n", - "critic_agent = AssistantAgent(\n", - " \"critic\",\n", - " model_client=model_client,\n", - " tools=[approve], # Register the approve function as a tool.\n", - " system_message=\"Provide constructive feedback. Use the approve tool to approve when all feedbacks are addressed.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we create the termination condition and the team.\n", - "We run the team with the poem-writing task." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a unique, Haiku about the weather in Paris\n", - "---------- primary ----------\n", - "Raindrops gently fall, \n", - "Cobblestones shine in dim light— \n", - "Paris dreams in grey. \n", - "---------- critic ----------\n", - "This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.\n", - "---------- primary ----------\n", - "Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\n", - "\n", - "Eiffel stands in mist, \n", - "Seine's ripple mirrors the sky— \n", - "Spring whispers anew. \n", - "---------- critic ----------\n", - "[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')]\n", - "---------- critic ----------\n", - "[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)]\n", - "---------- critic ----------\n", - "None\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Write a unique, Haiku about the weather in Paris', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=23), metadata={}, content='Raindrops gently fall, \\nCobblestones shine in dim light— \\nParis dreams in grey. ', type='TextMessage'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=99, completion_tokens=90), metadata={}, content='This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=152, completion_tokens=48), metadata={}, content=\"Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\\n\\nEiffel stands in mist, \\nSeine's ripple mirrors the sky— \\nSpring whispers anew. \", type='TextMessage'), ToolCallRequestEvent(source='critic', models_usage=RequestUsage(prompt_tokens=246, completion_tokens=11), metadata={}, content=[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='critic', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='critic', models_usage=None, metadata={}, content='None', type='ToolCallSummaryMessage')], stop_reason=\"Function 'approve' was executed.\")" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function_call_termination = FunctionCallTermination(function_name=\"approve\")\n", - "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=function_call_termination)\n", - "\n", - "# Use asyncio.run(...) if you are running this script as a standalone script.\n", - "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that the conversation stopped when the critic agent approved the message using the `approve` function call." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Termination \n", + "\n", + "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", + "\n", + "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", + "\n", + "A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", + "\n", + "Some important things to note about termination conditions: \n", + "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", + "- They can be combined using the AND and OR operators.\n", + "\n", + "```{note}\n", + "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", + "the termination condition is called after each agent responds.\n", + "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", + "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Built-In Termination Conditions: \n", + "1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", + "2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", + "3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", + "4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n", + "5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", + "6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n", + "7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", + "8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent.\n", + "9. {py:class}`~autogen_agentchat.conditions.TextMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.TextMessage` is produced by an agent.\n", + "10. {py:class}`~autogen_agentchat.conditions.FunctionCallTermination`: Stops when a {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent` containing a {py:class}`~autogen_core.models.FunctionExecutionResult` with a matching name is produced by an agent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic Usage\n", + "\n", + "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " temperature=1,\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Gentle rain whispers, \n", + "Cobblestones glisten softly— \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 30, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", + "\n", + "For example:\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "\n", + "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", + "[Prompt tokens: 70, Completion tokens: 120]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 100\n", + "Total completion tokens: 139\n", + "Duration: 3.34 seconds\n" + ] }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=3)\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your feedback. Here is the revised Haiku:\n", + "\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 181, Completion tokens: 32]\n", + "---------- critic ----------\n", + "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 234, Completion tokens: 54]\n", + "---------- primary ----------\n", + "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", + "[Prompt tokens: 279, Completion tokens: 39]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 694\n", + "Total completion tokens: 125\n", + "Duration: 6.43 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The team continued from where it left off, allowing the primary agent to respond to the feedback." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Combining Termination Conditions\n", + "\n", + "Let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Spring breeze gently hums, \n", + "Cherry blossoms in full bloom— \n", + "Paris wakes to life.\n", + "[Prompt tokens: 467, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", + "\n", + "APPROVE\n", + "[Prompt tokens: 746, Completion tokens: 93]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1213\n", + "Total completion tokens: 112\n", + "Duration: 2.75 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "max_msg_termination = MaxMessageTermination(max_messages=10)\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "combined_termination = max_msg_termination | text_termination\n", + "\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", + "\n", + "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "combined_termination = max_msg_termination & text_termination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Termination Condition\n", + "\n", + "The built-in termination conditions are sufficient for most use cases.\n", + "However, there may be cases where you need to implement a custom termination condition that doesn't fit into the existing ones.\n", + "You can do this by subclassing the {py:class}`~autogen_agentchat.base.TerminationCondition` class.\n", + "\n", + "In this example, we create a custom termination condition that stops the conversation when\n", + "a specific function call is made." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Sequence\n", + "\n", + "from autogen_agentchat.base import TerminatedException, TerminationCondition\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, StopMessage, ToolCallExecutionEvent\n", + "from autogen_core import Component\n", + "from pydantic import BaseModel\n", + "from typing_extensions import Self\n", + "\n", + "\n", + "class FunctionCallTerminationConfig(BaseModel):\n", + " \"\"\"Configuration for the termination condition to allow for serialization\n", + " and deserialization of the component.\n", + " \"\"\"\n", + "\n", + " function_name: str\n", + "\n", + "\n", + "class FunctionCallTermination(TerminationCondition, Component[FunctionCallTerminationConfig]):\n", + " \"\"\"Terminate the conversation if a FunctionExecutionResult with a specific name is received.\"\"\"\n", + "\n", + " component_config_schema = FunctionCallTerminationConfig\n", + " \"\"\"The schema for the component configuration.\"\"\"\n", + "\n", + " def __init__(self, function_name: str) -> None:\n", + " self._terminated = False\n", + " self._function_name = function_name\n", + "\n", + " @property\n", + " def terminated(self) -> bool:\n", + " return self._terminated\n", + "\n", + " async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:\n", + " if self._terminated:\n", + " raise TerminatedException(\"Termination condition has already been reached\")\n", + " for message in messages:\n", + " if isinstance(message, ToolCallExecutionEvent):\n", + " for execution in message.content:\n", + " if execution.name == self._function_name:\n", + " self._terminated = True\n", + " return StopMessage(\n", + " content=f\"Function '{self._function_name}' was executed.\",\n", + " source=\"FunctionCallTermination\",\n", + " )\n", + " return None\n", + "\n", + " async def reset(self) -> None:\n", + " self._terminated = False\n", + "\n", + " def _to_config(self) -> FunctionCallTerminationConfig:\n", + " return FunctionCallTerminationConfig(\n", + " function_name=self._function_name,\n", + " )\n", + "\n", + " @classmethod\n", + " def _from_config(cls, config: FunctionCallTerminationConfig) -> Self:\n", + " return cls(\n", + " function_name=config.function_name,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use this new termination condition to stop the conversation when the critic agent approves a message\n", + "using the `approve` function call.\n", + "\n", + "First we create a simple function that will be called when the critic agent approves a message." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def approve() -> None:\n", + " \"\"\"Approve the message when all feedbacks have been addressed.\"\"\"\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we create the agents. The critic agent is equipped with the `approve` tool." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " temperature=1,\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", + ")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent with the approve function as a tool.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", + " model_client=model_client,\n", + " tools=[approve], # Register the approve function as a tool.\n", + " system_message=\"Provide constructive feedback. Use the approve tool to approve when all feedbacks are addressed.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we create the termination condition and the team.\n", + "We run the team with the poem-writing task." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a unique, Haiku about the weather in Paris\n", + "---------- primary ----------\n", + "Raindrops gently fall, \n", + "Cobblestones shine in dim light— \n", + "Paris dreams in grey. \n", + "---------- critic ----------\n", + "This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.\n", + "---------- primary ----------\n", + "Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\n", + "\n", + "Eiffel stands in mist, \n", + "Seine's ripple mirrors the sky— \n", + "Spring whispers anew. \n", + "---------- critic ----------\n", + "[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')]\n", + "---------- critic ----------\n", + "[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)]\n", + "---------- critic ----------\n", + "None\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Write a unique, Haiku about the weather in Paris', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=23), metadata={}, content='Raindrops gently fall, \\nCobblestones shine in dim light— \\nParis dreams in grey. ', type='TextMessage'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=99, completion_tokens=90), metadata={}, content='This Haiku beautifully captures a melancholic yet romantic image of Paris in the rain. The use of sensory imagery like \"Raindrops gently fall\" and \"Cobblestones shine\" effectively paints a vivid picture. It could be interesting to experiment with more distinct seasonal elements of Paris, such as incorporating the Seine River or iconic landmarks in the context of the weather. Overall, it successfully conveys the atmosphere of Paris in subtle, poetic imagery.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=152, completion_tokens=48), metadata={}, content=\"Thank you for your feedback! I’m glad you enjoyed the imagery. Here’s another Haiku that incorporates iconic Parisian elements:\\n\\nEiffel stands in mist, \\nSeine's ripple mirrors the sky— \\nSpring whispers anew. \", type='TextMessage'), ToolCallRequestEvent(source='critic', models_usage=RequestUsage(prompt_tokens=246, completion_tokens=11), metadata={}, content=[FunctionCall(id='call_QEWJZ873EG4UIEpsQHi1HsAu', arguments='{}', name='approve')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='critic', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='None', name='approve', call_id='call_QEWJZ873EG4UIEpsQHi1HsAu', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='critic', models_usage=None, metadata={}, content='None', type='ToolCallSummaryMessage')], stop_reason=\"Function 'approve' was executed.\")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_call_termination = FunctionCallTermination(function_name=\"approve\")\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=function_call_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that the conversation stopped when the critic agent approved the message using the `approve` function call." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py index 6603857b3557..451d5826bad7 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py @@ -126,7 +126,7 @@ async def main(): [TextMessage(content="Can you base64 decode the value 'YWJjZGU=', please?", source="user")], CancellationToken(), ) - print(response.chat_message.content_to_text()) + print(response.chat_message.content) asyncio.run(main()) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py index 973ad508f4cc..3eb8634b3698 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py @@ -105,7 +105,7 @@ async def main() -> None: # Let the agent fetch the content of a URL and summarize it. result = await agent.run(task="Summarize the content of https://en.wikipedia.org/wiki/Seattle") - print(result.messages[-1].content_to_text()) + print(result.messages[-1].content) asyncio.run(main()) diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index addb125d6806..1951205e8ed5 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -9,6 +9,7 @@ Optional, Tuple, TypeVar, + cast, ) from autogen_agentchat.base import Response, TaskResult @@ -55,12 +56,12 @@ def aprint(output: str, end: str = "\n") -> Awaitable[None]: return asyncio.to_thread(print, output, end=end) -def _extract_message_content(message: ChatMessage | AgentEvent) -> Tuple[List[str], List[Image]]: +def _extract_message_content(message: AgentEvent | ChatMessage) -> Tuple[List[str], List[Image]]: if isinstance(message, MultiModalMessage): text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] else: - text_parts = [message.content_to_text()] + text_parts = [str(message.content)] image_parts = [] return text_parts, image_parts @@ -99,7 +100,7 @@ async def _aprint_message_content( async def RichConsole( - stream: AsyncGenerator[ChatMessage | AgentEvent | T, None], + stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -116,7 +117,7 @@ async def RichConsole( It will be improved in future releases. Args: - stream (AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None] | AsyncGenerator[ChatMessage | AgentEvent | Response, None]): Message stream to render. + stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. @@ -189,6 +190,9 @@ async def RichConsole( # TODO: Handle model client streaming chunk events. pass else: + # Cast required for mypy to be happy + message = cast(AgentEvent | ChatMessage, message) # type: ignore + text_parts, image_parts = _extract_message_content(message) # Add usage stats if needed if message.models_usage: diff --git a/python/samples/agentchat_chess_game/main.py b/python/samples/agentchat_chess_game/main.py index 01d25cc6a66f..e12db1d94778 100644 --- a/python/samples/agentchat_chess_game/main.py +++ b/python/samples/agentchat_chess_game/main.py @@ -78,7 +78,8 @@ async def get_ai_move(board: chess.Board, player: AssistantAgent, max_tries: int while count < max_tries: result = await Console(player.run_stream(task=task)) count += 1 - response = result.messages[-1].content_to_text() + response = result.messages[-1].content + assert isinstance(response, str) # Check if the response is a valid UC move. try: move = chess.Move.from_uci(extract_move(response)) diff --git a/python/samples/agentchat_streamlit/agent.py b/python/samples/agentchat_streamlit/agent.py index 2d03894914a2..cbe588828838 100644 --- a/python/samples/agentchat_streamlit/agent.py +++ b/python/samples/agentchat_streamlit/agent.py @@ -1,6 +1,6 @@ import yaml from autogen_agentchat.agents import AssistantAgent -from autogen_agentchat.messages import TextChatMessage, TextMessage +from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_core.models import ChatCompletionClient @@ -22,5 +22,5 @@ async def chat(self, prompt: str) -> str: [TextMessage(content=prompt, source="user")], CancellationToken(), ) - assert isinstance(response.chat_message, TextChatMessage) + assert isinstance(response.chat_message.content, str) return response.chat_message.content From dc0cd08bb9089e2dfdf482c51c92136cb125cce3 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:48:40 -0700 Subject: [PATCH 17/26] swap --- .../agents/_assistant_agent.py | 14 +++++----- .../agents/_base_chat_agent.py | 8 +++--- .../agents/_society_of_mind_agent.py | 4 +-- .../agents/_user_proxy_agent.py | 2 +- .../src/autogen_agentchat/base/_chat_agent.py | 4 +-- .../src/autogen_agentchat/base/_task.py | 4 +-- .../autogen_agentchat/base/_termination.py | 8 +++--- .../conditions/_terminations.py | 20 +++++++------- .../src/autogen_agentchat/messages.py | 8 +++--- .../teams/_group_chat/_base_group_chat.py | 10 +++---- .../_group_chat/_base_group_chat_manager.py | 8 +++--- .../_group_chat/_chat_agent_container.py | 2 +- .../teams/_group_chat/_events.py | 2 +- .../_magentic_one/_magentic_one_group_chat.py | 2 +- .../_magentic_one_orchestrator.py | 6 ++--- .../_group_chat/_round_robin_group_chat.py | 8 +++--- .../teams/_group_chat/_selector_group_chat.py | 26 +++++++++---------- .../teams/_group_chat/_swarm_group_chat.py | 8 +++--- .../src/autogen_agentchat/ui/_console.py | 6 ++--- .../tests/test_group_chat.py | 4 +-- .../tests/test_group_chat_endpoint.py | 2 +- .../agents/openai/_openai_assistant_agent.py | 4 +-- .../web_surfer/_multimodal_web_surfer.py | 4 +-- .../task_centric_memory/utils/apprentice.py | 2 +- .../task_centric_memory/utils/page_logger.py | 2 +- 25 files changed, 84 insertions(+), 84 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index b6a677d94f87..7a6fdff1f6ba 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -720,7 +720,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """ Process the incoming messages with the assistant agent and yield events/responses as they happen. """ @@ -745,7 +745,7 @@ async def on_messages_stream( ) # STEP 2: Update model context with any relevant memory - inner_messages: List[ChatMessage | AgentEvent] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] for event_msg in await self._update_model_context_with_memory( memory=memory, model_context=model_context, @@ -886,7 +886,7 @@ async def _call_llm( async def _process_model_result( cls, model_result: CreateResult, - inner_messages: List[ChatMessage | AgentEvent], + inner_messages: List[AgentEvent | ChatMessage], cancellation_token: CancellationToken, agent_name: str, system_messages: List[SystemMessage], @@ -898,7 +898,7 @@ async def _process_model_result( model_client_stream: bool, reflect_on_tool_use: bool, tool_call_summary_format: str, - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """ Handle final or partial responses from model_result, including tool calls, handoffs, and reflection if needed. @@ -992,7 +992,7 @@ async def _process_model_result( def _check_and_handle_handoff( model_result: CreateResult, executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[ChatMessage | AgentEvent], + inner_messages: List[AgentEvent | ChatMessage], handoffs: Dict[str, HandoffBase], agent_name: str, ) -> Optional[Response]: @@ -1057,7 +1057,7 @@ async def _reflect_on_tool_use_flow( model_client_stream: bool, model_context: ChatCompletionContext, agent_name: str, - inner_messages: List[ChatMessage | AgentEvent], + inner_messages: List[AgentEvent | ChatMessage], ) -> AsyncGenerator[Response | ModelClientStreamingChunkEvent | ThoughtEvent, None]: """ If reflect_on_tool_use=True, we do another inference based on tool results @@ -1109,7 +1109,7 @@ async def _reflect_on_tool_use_flow( @staticmethod def _summarize_tool_use( executed_calls_and_results: List[Tuple[FunctionCall, FunctionExecutionResult]], - inner_messages: List[ChatMessage | AgentEvent], + inner_messages: List[AgentEvent | ChatMessage], handoffs: Dict[str, HandoffBase], tool_call_summary_format: str, agent_name: str, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index 8e671bdd8896..375e296c23bb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -82,7 +82,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handles incoming messages and returns a stream of messages and and the final item is the response. The base implementation in :class:`BaseChatAgent` simply calls :meth:`on_messages` and yields @@ -113,7 +113,7 @@ async def run( if cancellation_token is None: cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] - output_messages: List[ChatMessage | AgentEvent] = [] + output_messages: List[AgentEvent | ChatMessage] = [] if task is None: pass elif isinstance(task, str): @@ -144,13 +144,13 @@ async def run_stream( *, task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the agent with the given task and return a stream of messages and the final task result as the last item in the stream.""" if cancellation_token is None: cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] - output_messages: List[ChatMessage | AgentEvent] = [] + output_messages: List[AgentEvent | ChatMessage] = [] if task is None: pass elif isinstance(task, str): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index e78c3741db9a..d81a1889a0e6 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -136,13 +136,13 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: # Prepare the task for the team of agents. task = list(messages) # Run the team of agents. result: TaskResult | None = None - inner_messages: List[ChatMessage | AgentEvent] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] count = 0 async for inner_msg in self._team.run_stream(task=task, cancellation_token=cancellation_token): if isinstance(inner_msg, TaskResult): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 7131b1f13abb..d78a5c6e9ca8 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -207,7 +207,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handle incoming messages by requesting user input.""" try: # Check for handoff first diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index 8a7053db21a2..9b594b4df6d4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -16,7 +16,7 @@ class Response: chat_message: ChatMessage """A chat message produced by the agent as the response.""" - inner_messages: Sequence[ChatMessage | AgentEvent] | None = None + inner_messages: Sequence[AgentEvent | ChatMessage] | None = None """Inner messages produced by the agent.""" @@ -55,7 +55,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: @abstractmethod def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handles incoming messages and returns a stream of inner messages and and the final item is the response.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py index ca3e44dbce0b..90e319ee36c6 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py @@ -10,7 +10,7 @@ class TaskResult: """Result of running a task.""" - messages: Sequence[ChatMessage | AgentEvent] + messages: Sequence[AgentEvent | ChatMessage] """Messages produced by the task.""" stop_reason: str | None = None @@ -40,7 +40,7 @@ def run_stream( *, task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the task and produces a stream of messages and the final result :class:`TaskResult` as the last item in the stream. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index 2df6da347629..3ad5872376ae 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -15,7 +15,7 @@ class TerminatedException(BaseException): ... class TerminationCondition(ABC, ComponentBase[BaseModel]): """A stateful condition that determines when a conversation should be terminated. - A termination condition is a callable that takes a sequence of :class:`ChatMessage | AgentEvent` objects + A termination condition is a callable that takes a sequence of :class:`AgentEvent | ChatMessage` objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again. @@ -56,7 +56,7 @@ def terminated(self) -> bool: ... @abstractmethod - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: """Check if the conversation should be terminated based on the messages received since the last time the condition was called. Return a StopMessage if the conversation should be terminated, or None otherwise. @@ -102,7 +102,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return all(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached.") # Check all remaining conditions. @@ -153,7 +153,7 @@ def __init__(self, *conditions: TerminationCondition) -> None: def terminated(self) -> bool: return any(condition.terminated for condition in self._conditions) - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise RuntimeError("Termination condition has already been reached") stop_messages = await asyncio.gather(*[condition(messages) for condition in self._conditions]) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 0b4aac79b3cb..e4f98257453f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -33,7 +33,7 @@ def __init__(self) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -79,7 +79,7 @@ def __init__(self, max_messages: int, include_agent_event: bool = False) -> None def terminated(self) -> bool: return self._message_count >= self._max_messages - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, ChatMessage)]) @@ -127,7 +127,7 @@ def __init__(self, text: str, sources: Sequence[str] | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -199,7 +199,7 @@ def terminated(self) -> bool: or (self._max_completion_token is not None and self._completion_token_count >= self._max_completion_token) ) - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -256,7 +256,7 @@ def __init__(self, target: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -301,7 +301,7 @@ def __init__(self, timeout_seconds: float) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") @@ -363,7 +363,7 @@ def set(self) -> None: """Set the termination condition to terminated.""" self._setted = True - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if self._setted: @@ -408,7 +408,7 @@ def __init__(self, sources: List[str]) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") if not messages: @@ -461,7 +461,7 @@ def __init__(self, source: str | None = None) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: @@ -511,7 +511,7 @@ def __init__(self, function_name: str) -> None: def terminated(self) -> bool: return self._terminated - async def __call__(self, messages: Sequence[ChatMessage | AgentEvent]) -> StopMessage | None: + async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self._terminated: raise TerminatedException("Termination condition has already been reached") for message in messages: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 385166afbb20..aee6365a7ac2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -339,7 +339,7 @@ class MessageFactory: """ def __init__(self) -> None: - self._message_types: Dict[str, type[ChatMessage | AgentEvent]] = {} + self._message_types: Dict[str, type[AgentEvent | ChatMessage]] = {} # Register all message types. self._message_types[TextMessage.__name__] = TextMessage self._message_types[MultiModalMessage.__name__] = MultiModalMessage @@ -353,14 +353,14 @@ def __init__(self) -> None: self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent self._message_types[ThoughtEvent.__name__] = ThoughtEvent - def is_registered(self, message_type: type[ChatMessage | AgentEvent]) -> bool: + def is_registered(self, message_type: type[AgentEvent | ChatMessage]) -> bool: """Check if a message type is registered with the factory.""" # Get the class name of the message type. class_name = message_type.__name__ # Check if the class name is already registered. return class_name in self._message_types - def register(self, message_type: type[ChatMessage | AgentEvent]) -> None: + def register(self, message_type: type[AgentEvent | ChatMessage]) -> None: """Register a new message type with the factory.""" if self.is_registered(message_type): raise ValueError(f"Message type {message_type} is already registered.") @@ -372,7 +372,7 @@ def register(self, message_type: type[ChatMessage | AgentEvent]) -> None: # Register the message type. self._message_types[class_name] = message_type - def create(self, data: Mapping[str, Any]) -> ChatMessage | AgentEvent: + def create(self, data: Mapping[str, Any]) -> AgentEvent | ChatMessage: """Create a message from a dictionary of JSON-serializable data.""" # Get the type of the message from the dictionary. message_type = data.get("type") diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index a568b35302ce..9e4f77a3a135 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -50,7 +50,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, + custom_message_types: List[type[AgentEvent | ChatMessage]] | None = None, ): if len(participants) == 0: raise ValueError("At least one participant is required.") @@ -90,7 +90,7 @@ def __init__( self._output_topic_type = f"output_topic_{self._team_id}" # The queue for collecting the output messages. - self._output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination] = asyncio.Queue() + self._output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination] = asyncio.Queue() # Create a runtime for the team. if runtime is not None: @@ -117,7 +117,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -299,7 +299,7 @@ async def run_stream( *, task: str | ChatMessage | Sequence[ChatMessage] | None = None, cancellation_token: CancellationToken | None = None, - ) -> AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]: """Run the team and produces a stream of messages and the final result of the type :class:`~autogen_agentchat.base.TaskResult` as the last item in the stream. Once the team is stopped, the termination condition is reset. @@ -469,7 +469,7 @@ async def stop_runtime() -> None: cancellation_token=cancellation_token, ) # Collect the output messages in order. - output_messages: List[ChatMessage | AgentEvent] = [] + output_messages: List[AgentEvent | ChatMessage] = [] stop_reason: str | None = None # Yield the messsages until the queue is empty. while True: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py index 157b1fb0a57b..59653e5f31a5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py @@ -39,7 +39,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -67,7 +67,7 @@ def __init__( name: topic_type for name, topic_type in zip(participant_names, participant_topic_types, strict=True) } self._participant_descriptions = participant_descriptions - self._message_thread: List[ChatMessage | AgentEvent] = [] + self._message_thread: List[AgentEvent | ChatMessage] = [] self._output_message_queue = output_message_queue self._termination_condition = termination_condition if max_turns is not None and max_turns <= 0: @@ -141,7 +141,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # Append the message to the message thread and construct the delta. - delta: List[ChatMessage | AgentEvent] = [] + delta: List[AgentEvent | ChatMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: self._message_thread.append(inner_message) @@ -235,7 +235,7 @@ async def validate_group_state(self, messages: List[ChatMessage] | None) -> None ... @abstractmethod - async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants and return the topic type of the selected speaker.""" ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index 3df2ae728ca3..d4a2adda8e87 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -96,7 +96,7 @@ def _buffer_message(self, message: ChatMessage) -> None: # Buffer the message. self._message_buffer.append(message) - async def _log_message(self, message: ChatMessage | AgentEvent) -> None: + async def _log_message(self, message: AgentEvent | ChatMessage) -> None: if not self._message_factory.is_registered(message.__class__): raise ValueError(f"Message type {message.__class__} is not registered.") # Log the message. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py index 046230846189..f705a54c4a50 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_events.py @@ -29,7 +29,7 @@ class GroupChatRequestPublish(BaseModel): class GroupChatMessage(BaseModel): """A message from a group chat.""" - message: ChatMessage | AgentEvent + message: AgentEvent | ChatMessage """The message that was published.""" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py index dfc639ab1be2..66ff53fe18e9 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py @@ -128,7 +128,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index 5f8ba8fc6712..eb6d9c979e83 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -66,7 +66,7 @@ def __init__( model_client: ChatCompletionClient, max_stalls: int, final_answer_prompt: str, - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, ): super().__init__( @@ -184,7 +184,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No @event async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # type: ignore - delta: List[ChatMessage | AgentEvent] = [] + delta: List[AgentEvent | ChatMessage] = [] if message.agent_response.inner_messages is not None: for inner_message in message.agent_response.inner_messages: delta.append(inner_message) @@ -226,7 +226,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._n_rounds = orchestrator_state.n_rounds self._n_stalls = orchestrator_state.n_stalls - async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Not used in this orchestrator, we select next speaker in _orchestrate_step.""" return "" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 3541b0b1694e..0f06d9aeec9e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -24,7 +24,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -67,7 +67,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._current_turn = round_robin_state.current_turn self._next_speaker_index = round_robin_state.next_speaker_index - async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants in a round-robin fashion.""" current_speaker_index = self._next_speaker_index self._next_speaker_index = (current_speaker_index + 1) % len(self._participant_names) @@ -166,7 +166,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, + custom_message_types: List[type[AgentEvent | ChatMessage]] | None = None, ) -> None: super().__init__( participants, @@ -186,7 +186,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 7c369fec245d..3517914da3cc 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -24,12 +24,12 @@ trace_logger = logging.getLogger(TRACE_LOGGER_NAME) -SyncSelectorFunc = Callable[[Sequence[ChatMessage | AgentEvent]], str | None] -AsyncSelectorFunc = Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[str | None]] +SyncSelectorFunc = Callable[[Sequence[AgentEvent | ChatMessage]], str | None] +AsyncSelectorFunc = Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[str | None]] SelectorFuncType = Union[SyncSelectorFunc | AsyncSelectorFunc] -SyncCandidateFunc = Callable[[Sequence[ChatMessage | AgentEvent]], List[str]] -AsyncCandidateFunc = Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[List[str]]] +SyncCandidateFunc = Callable[[Sequence[AgentEvent | ChatMessage]], List[str]] +AsyncCandidateFunc = Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[List[str]]] CandidateFuncType = Union[SyncCandidateFunc | AsyncCandidateFunc] @@ -45,7 +45,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -102,7 +102,7 @@ async def load_state(self, state: Mapping[str, Any]) -> None: self._current_turn = selector_state.current_turn self._previous_speaker = selector_state.previous_speaker - async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Selects the next speaker in a group chat using a ChatCompletion client, with the selector function as override if it returns a speaker name. @@ -299,11 +299,11 @@ class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]): max_selector_attempts (int, optional): The maximum number of attempts to select a speaker using the model. Defaults to 3. If the model fails to select a speaker after the maximum number of attempts, the previous speaker will be used if available, otherwise the first participant will be used. - selector_func (Callable[[Sequence[ChatMessage | AgentEvent]], str | None], Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[str | None]], optional): A custom selector + selector_func (Callable[[Sequence[AgentEvent | ChatMessage]], str | None], Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[str | None]], optional): A custom selector function that takes the conversation history and returns the name of the next speaker. If provided, this function will be used to override the model to select the next speaker. If the function returns None, the model will be used to select the next speaker. - candidate_func (Callable[[Sequence[ChatMessage | AgentEvent]], List[str]], Callable[[Sequence[ChatMessage | AgentEvent]], Awaitable[List[str]]], optional): + candidate_func (Callable[[Sequence[AgentEvent | ChatMessage]], List[str]], Callable[[Sequence[AgentEvent | ChatMessage]], Awaitable[List[str]]], optional): A custom function that takes the conversation history and returns a filtered list of candidates for the next speaker selection using model. If the function returns an empty list or `None`, `SelectorGroupChat` will raise a `ValueError`. This function is only used if `selector_func` is not set. The `allow_repeated_speaker` will be ignored if set. @@ -378,7 +378,7 @@ async def book_trip() -> str: from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.ui import Console - from autogen_agentchat.messages import ChatMessage | AgentEvent + from autogen_agentchat.messages import AgentEvent | ChatMessage async def main() -> None: @@ -404,7 +404,7 @@ def check_calculation(x: int, y: int, answer: int) -> str: system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages: Sequence[ChatMessage | AgentEvent]) -> str | None: + def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if len(messages) == 1 or messages[-1].content_to_text() == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": @@ -448,7 +448,7 @@ def __init__( max_selector_attempts: int = 3, selector_func: Optional[SelectorFuncType] = None, candidate_func: Optional[CandidateFuncType] = None, - custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, + custom_message_types: List[type[AgentEvent | ChatMessage]] | None = None, ): super().__init__( participants, @@ -477,7 +477,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -525,7 +525,7 @@ def _from_config(cls, config: SelectorGroupChatConfig) -> Self: selector_prompt=config.selector_prompt, allow_repeated_speaker=config.allow_repeated_speaker, max_selector_attempts=config.max_selector_attempts, - # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[ChatMessage | AgentEvent]], str | None]) + # selector_func=ComponentLoader.load_component(config.selector_func, Callable[[Sequence[AgentEvent | ChatMessage]], str | None]) # if config.selector_func # else None, ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index d11b68791181..d76ba0fb1e44 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -23,7 +23,7 @@ def __init__( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, @@ -77,7 +77,7 @@ async def reset(self) -> None: await self._termination_condition.reset() self._current_speaker = self._participant_names[0] - async def select_speaker(self, thread: List[ChatMessage | AgentEvent]) -> str: + async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: """Select a speaker from the participants based on handoff message. Looks for the last handoff message in the thread to determine the next speaker.""" if len(thread) == 0: @@ -212,7 +212,7 @@ def __init__( termination_condition: TerminationCondition | None = None, max_turns: int | None = None, runtime: AgentRuntime | None = None, - custom_message_types: List[type[ChatMessage | AgentEvent]] | None = None, + custom_message_types: List[type[AgentEvent | ChatMessage]] | None = None, ) -> None: super().__init__( participants, @@ -236,7 +236,7 @@ def _create_group_chat_manager_factory( participant_topic_types: List[str], participant_names: List[str], participant_descriptions: List[str], - output_message_queue: asyncio.Queue[ChatMessage | AgentEvent | GroupChatTermination], + output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | GroupChatTermination], termination_condition: TerminationCondition | None, max_turns: int | None, message_factory: MessageFactory, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index ced6552ee43a..5be9ce3fcf31 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -80,7 +80,7 @@ def aprint(output: str, end: str = "\n", flush: bool = False) -> Awaitable[None] async def Console( - stream: AsyncGenerator[ChatMessage | AgentEvent | T, None], + stream: AsyncGenerator[AgentEvent | ChatMessage | T, None], *, no_inline_images: bool = False, output_stats: bool = False, @@ -97,7 +97,7 @@ async def Console( It will be improved in future releases. Args: - stream (AsyncGenerator[ChatMessage | AgentEvent | TaskResult, None] | AsyncGenerator[ChatMessage | AgentEvent | Response, None]): Message stream to render. + stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render. This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. output_stats (bool, optional): (Experimental) If True, will output a summary of the messages and inline token usage info. Defaults to False. @@ -170,7 +170,7 @@ async def Console( user_input_manager.notify_event_received(message.request_id) else: # Cast required for mypy to be happy - message = cast(ChatMessage | AgentEvent, message) # type: ignore + message = cast(AgentEvent | ChatMessage, message) # type: ignore if not streaming_chunks: # Print message sender. await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True) diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index a44180dbff9e..a9362ef48cb2 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -821,7 +821,7 @@ async def test_selector_group_chat_custom_selector(runtime: AgentRuntime | None) agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _select_agent(messages: Sequence[ChatMessage | AgentEvent]) -> str | None: + def _select_agent(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if len(messages) == 0: return "agent1" elif messages[-1].source == "agent1": @@ -862,7 +862,7 @@ async def test_selector_group_chat_custom_candidate_func(runtime: AgentRuntime | agent3 = _EchoAgent("agent3", description="echo agent 3") agent4 = _EchoAgent("agent4", description="echo agent 4") - def _candidate_func(messages: Sequence[ChatMessage | AgentEvent]) -> List[str]: + def _candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]: if len(messages) == 0: return ["agent1"] elif messages[-1].source == "agent1": diff --git a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py index cdbcd1df25d5..dd0c3ba71b7d 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat_endpoint.py @@ -33,7 +33,7 @@ async def _test_selector_group_chat(model_client: ChatCompletionClient) -> None: async def _test_selector_group_chat_with_candidate_func(model_client: ChatCompletionClient) -> None: filtered_participants = ["developer", "tester"] - def dummy_candidate_func(thread: Sequence[ChatMessage | AgentEvent]) -> List[str]: + def dummy_candidate_func(thread: Sequence[AgentEvent | ChatMessage]) -> List[str]: # Dummy candidate function that will return # only return developer and reviewer return filtered_participants diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index b0ba54170611..f806d2bb9632 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -402,7 +402,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: """Handle incoming messages and return a response.""" await self._ensure_initialized() @@ -411,7 +411,7 @@ async def on_messages_stream( await self.handle_incoming_message(message, cancellation_token) # Inner messages for tool calls - inner_messages: List[ChatMessage | AgentEvent] = [] + inner_messages: List[AgentEvent | ChatMessage] = [] # Create and start a run run: Run = await cancellation_token.link_future( diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index 5be0cfdbaa0d..f5cfcbf1d29f 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -430,11 +430,11 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken - ) -> AsyncGenerator[ChatMessage | AgentEvent | Response, None]: + ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: for chat_message in messages: self._chat_history.append(chat_message.content_to_model_message()) - self.inner_messages: List[ChatMessage | AgentEvent] = [] + self.inner_messages: List[AgentEvent | ChatMessage] = [] self.model_usage: List[RequestUsage] = [] try: content = await self._generate_reply(cancellation_token=cancellation_token) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index 74bcf3d4435b..d4fb0cae3eb7 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -190,7 +190,7 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: # Get the agent's response to the task. task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) - messages: Sequence[ChatMessage | AgentEvent] = task_result.messages + messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message = messages[-1] response_str = message.content diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py index a044931492d7..92964dfbec12 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py @@ -343,7 +343,7 @@ def log_model_task( if self.level > self.levels["INFO"]: return None - messages: Sequence[ChatMessage | AgentEvent] = task_result.messages + messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message = messages[-1] response_str = message.content if not isinstance(response_str, str): From c87f58369d49fec6af26b18138ae945a000424ec Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 11:49:31 -0700 Subject: [PATCH 18/26] revert --- .../experimental/task_centric_memory/utils/apprentice.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index d4fb0cae3eb7..a8104c0ebc44 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -191,7 +191,7 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: # Get the agent's response to the task. task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) messages: Sequence[AgentEvent | ChatMessage] = task_result.messages - message = messages[-1] + message: AgentEvent | ChatMessage = messages[-1] response_str = message.content # Log the model call From c2d994e853437a3bd6a1d8f706fc435065dbedf9 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 12:07:11 -0700 Subject: [PATCH 19/26] clean up --- .../agents/_user_proxy_agent.py | 4 ++-- .../src/autogen_agentchat/base/_chat_agent.py | 3 ++- .../src/autogen_agentchat/base/_termination.py | 2 +- .../conditions/_terminations.py | 17 +++++++++++++---- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index d78a5c6e9ca8..3ca0ec890324 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -82,7 +82,7 @@ async def simple_user_agent(): cancellation_token=CancellationToken(), ) ) - print(f"Your name is {response.chat_message.content_to_text()}") + print(f"Your name is {response.chat_message.content}") Example: Cancellable usage case:: @@ -117,7 +117,7 @@ async def cancellable_user_agent(): ) ) response = await agent_task - print(f"Your name is {response.chat_message.content_to_text()}") + print(f"Your name is {response.chat_message.content}") except Exception as e: print(f"Exception: {e}") except BaseException as e: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py index 9b594b4df6d4..ec885ee7f8cb 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_chat_agent.py @@ -17,7 +17,8 @@ class Response: """A chat message produced by the agent as the response.""" inner_messages: Sequence[AgentEvent | ChatMessage] | None = None - """Inner messages produced by the agent.""" + """Inner messages produced by the agent, they can be :class:`AgentEvent` + or :class:`ChatMessage`.""" class ChatAgent(ABC, TaskRunner, ComponentBase[BaseModel]): diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index 3ad5872376ae..d8a3adb96818 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -15,7 +15,7 @@ class TerminatedException(BaseException): ... class TerminationCondition(ABC, ComponentBase[BaseModel]): """A stateful condition that determines when a conversation should be terminated. - A termination condition is a callable that takes a sequence of :class:`AgentEvent | ChatMessage` objects + A termination condition is a callable that takes a sequence of ChatMessage objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index e4f98257453f..7ccddd1f6da4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -8,8 +8,10 @@ from ..base import TerminatedException, TerminationCondition from ..messages import ( AgentEvent, + BaseChatMessage, ChatMessage, HandoffMessage, + MultiModalMessage, StopMessage, TextMessage, ToolCallExecutionEvent, @@ -82,7 +84,7 @@ def terminated(self) -> bool: async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None: if self.terminated: raise TerminatedException("Termination condition has already been reached") - self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, ChatMessage)]) + self._message_count += len([m for m in messages if self._include_agent_event or isinstance(m, BaseChatMessage)]) if self._message_count >= self._max_messages: return StopMessage( content=f"Maximum number of messages {self._max_messages} reached, current message count: {self._message_count}", @@ -108,7 +110,8 @@ class TextMentionTerminationConfig(BaseModel): class TextMentionTermination(TerminationCondition, Component[TextMentionTerminationConfig]): - """Terminate the conversation if a specific text is mentioned in any :class:`~autogen_agentchat.messages.ChatMessage`. + """Terminate the conversation if a specific text is mentioned. + Args: text: The text to look for in the messages. @@ -134,12 +137,18 @@ async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMe if self._sources is not None and message.source not in self._sources: continue - content = message.content_to_text() - if self._termination_text in content: + if isinstance(message.content, str) and self._termination_text in message.content: self._terminated = True return StopMessage( content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" ) + elif isinstance(message, MultiModalMessage): + for item in message.content: + if isinstance(item, str) and self._termination_text in item: + self._terminated = True + return StopMessage( + content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" + ) return None async def reset(self) -> None: From cb860acc9d13860bd7488903bb0626648e621756 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 12:12:47 -0700 Subject: [PATCH 20/26] update --- .../src/autogen_agentchat/messages.py | 78 ++++++++++++------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index aee6365a7ac2..674cbfb61af4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -15,45 +15,22 @@ class and includes specific fields relevant to the type of message being sent. class BaseMessage(BaseModel, ABC): - """Base class for all message types in AgentChat. + """Base class for all message types in AgentChat. This is an abstract class + with default implementations for serialization and deserialization. .. warning:: If you want to create a new message type, do not inherit from this class. - Instead, inherit from :class:`ChatMessage` or :class:`BaseAgentEvent` + Instead, inherit from :class:`ChatMessage` or :class:`AgentEvent` to clarify the purpose of the message type. """ - content: Any - """The content of the message. This can be any type, but it is expected to be - specified in the derived class.""" - - source: str - """The name of the agent that sent this message.""" - - models_usage: RequestUsage | None = None - """The model client usage incurred when producing this message.""" - - metadata: Dict[str, str] = {} - """Additional metadata about the message.""" - - model_config = ConfigDict(arbitrary_types_allowed=True) - @computed_field def type(self) -> str: """The class name of this message.""" return self.__class__.__name__ - @abstractmethod - def content_to_text(self) -> str: - """Convert the content of the message to a string-only representation - that can be rendered in the console and inspected by the user. - - This is not used for creating text-only content for models. - For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" - ... - def dump(self) -> Mapping[str, Any]: """Convert the message to a JSON-serializable dictionary. @@ -88,6 +65,30 @@ class ChatMessage(BaseMessage, ABC): message using models and return a response as another :class:`ChatMessage`. """ + content: Any + """The content of the message. This can be any type, but it is expected to be + specified in the derived class.""" + + source: str + """The name of the agent that sent this message.""" + + models_usage: RequestUsage | None = None + """The model client usage incurred when producing this message.""" + + metadata: Dict[str, str] = {} + """Additional metadata about the message.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + @abstractmethod + def content_to_text(self) -> str: + """Convert the content of the message to a string-only representation + that can be rendered in the console and inspected by the user. + + This is not used for creating text-only content for models. + For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" + ... + @abstractmethod def content_to_model_text(self) -> str: """Convert the content of the message to text-only representation. @@ -147,7 +148,29 @@ class AgentEvent(BaseMessage, ABC): a custom rendering of the content. """ - ... + content: Any + """The content of the message. This can be any type, but it is expected to be + specified in the derived class.""" + + source: str + """The name of the agent that sent this message.""" + + models_usage: RequestUsage | None = None + """The model client usage incurred when producing this message.""" + + metadata: Dict[str, str] = {} + """Additional metadata about the message.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + @abstractmethod + def content_to_text(self) -> str: + """Convert the content of the message to a string-only representation + that can be rendered in the console and inspected by the user. + + This is not used for creating text-only content for models. + For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" + ... StructuredContentType = TypeVar("StructuredContentType", bound=BaseModel, covariant=True) @@ -388,6 +411,7 @@ def create(self, data: Mapping[str, Any]) -> AgentEvent | ChatMessage: assert issubclass(message_class, ChatMessage) or issubclass(message_class, AgentEvent) return message_class.load(data) + # For backward compatibility BaseAgentEvent = AgentEvent BaseChatMessage = ChatMessage From 74d6fcf823d79e4ad0de9254fdba28176d302ad6 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 12:19:31 -0700 Subject: [PATCH 21/26] lint --- .../autogen_agentchat/teams/_group_chat/_selector_group_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 3517914da3cc..8bb775d5d443 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -378,7 +378,7 @@ async def book_trip() -> str: from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.conditions import TextMentionTermination from autogen_agentchat.ui import Console - from autogen_agentchat.messages import AgentEvent | ChatMessage + from autogen_agentchat.messages import AgentEvent, ChatMessage async def main() -> None: From 8813396edcaf1fdd2e07c287b55223c54552c793 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 13:26:23 -0700 Subject: [PATCH 22/26] move content to concrete classes --- .../conditions/_terminations.py | 11 ++--------- .../src/autogen_agentchat/messages.py | 8 -------- .../agents/openai/_openai_assistant_agent.py | 2 +- .../task_centric_memory/utils/apprentice.py | 9 ++------- .../task_centric_memory/utils/page_logger.py | 2 +- .../src/autogen_ext/ui/_rich_console.py | 2 +- .../tests/test_openai_assistant_agent.py | 17 +++++++---------- 7 files changed, 14 insertions(+), 37 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 7ccddd1f6da4..3a521377ee6a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -11,7 +11,6 @@ BaseChatMessage, ChatMessage, HandoffMessage, - MultiModalMessage, StopMessage, TextMessage, ToolCallExecutionEvent, @@ -137,18 +136,12 @@ async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMe if self._sources is not None and message.source not in self._sources: continue - if isinstance(message.content, str) and self._termination_text in message.content: + content = message.content_to_text() + if self._termination_text in content: self._terminated = True return StopMessage( content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" ) - elif isinstance(message, MultiModalMessage): - for item in message.content: - if isinstance(item, str) and self._termination_text in item: - self._terminated = True - return StopMessage( - content=f"Text '{self._termination_text}' mentioned", source="TextMentionTermination" - ) return None async def reset(self) -> None: diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 674cbfb61af4..0b9a980d43bc 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -65,10 +65,6 @@ class ChatMessage(BaseMessage, ABC): message using models and return a response as another :class:`ChatMessage`. """ - content: Any - """The content of the message. This can be any type, but it is expected to be - specified in the derived class.""" - source: str """The name of the agent that sent this message.""" @@ -148,10 +144,6 @@ class AgentEvent(BaseMessage, ABC): a custom rendering of the content. """ - content: Any - """The content of the message. This can be any type, but it is expected to be - specified in the derived class.""" - source: str """The name of the agent that sent this message.""" diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index f806d2bb9632..6aa93e1f3184 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -532,7 +532,7 @@ async def handle_incoming_message(self, message: ChatMessage, cancellation_token elif isinstance(c, Image): content.append(ImageURLContentBlockParam(image_url=ImageURLParam(url=c.data_uri), type="image_url")) else: - raise ValueError(f"Unsupported content type: {type(c)} in {message.content}") + raise ValueError(f"Unsupported content type: {type(c)} in {message}") await cancellation_token.link_future( asyncio.ensure_future( self._client.beta.threads.messages.create( diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index a8104c0ebc44..575250c513ae 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -192,7 +192,7 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message: AgentEvent | ChatMessage = messages[-1] - response_str = message.content + response_str = message.content_to_text() # Log the model call self.logger.log_model_task( @@ -245,12 +245,7 @@ async def _assign_task_to_magentic_one(self, task: str) -> Tuple[str, str]: response_str_list: List[str] = [] for message in messages: - content = message.content - if isinstance(content, str): - content_str = content - else: - content_str = "Not a string." - response_str_list.append(content_str) + response_str_list.append(message.content_to_text()) response_str = "\n".join(response_str_list) self.logger.info("\n----- RESPONSE -----\n\n{}\n".format(response_str)) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py index 92964dfbec12..890ca8c34e1e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py @@ -345,7 +345,7 @@ def log_model_task( messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message = messages[-1] - response_str = message.content + response_str = message.content_to_text() if not isinstance(response_str, str): response_str = "??" diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index 1951205e8ed5..d052928057eb 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -61,7 +61,7 @@ def _extract_message_content(message: AgentEvent | ChatMessage) -> Tuple[List[st text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] else: - text_parts = [str(message.content)] + text_parts = [message.content_to_text()] image_parts = [] return text_parts, image_parts diff --git a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py index 43bd3447737a..2213d6f6a486 100644 --- a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py +++ b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py @@ -8,7 +8,7 @@ import aiofiles import pytest -from autogen_agentchat.messages import ChatMessage, TextMessage +from autogen_agentchat.messages import ChatMessage, TextMessage, ToolCallRequestEvent from autogen_core import CancellationToken from autogen_core.tools._base import BaseTool, Tool from autogen_ext.agents.openai import OpenAIAssistantAgent @@ -250,8 +250,7 @@ async def fake_async_aiofiles_open(*args: Any, **kwargs: Dict[str, Any]) -> Asyn message = TextMessage(source="user", content="What is the first sentence of the jungle scout book?") response = await agent.on_messages([message], cancellation_token) - assert response.chat_message.content is not None - assert isinstance(response.chat_message.content, str) + assert isinstance(response.chat_message, TextMessage) assert len(response.chat_message.content) > 0 await agent.delete_uploaded_files(cancellation_token) @@ -271,8 +270,7 @@ async def test_code_interpreter( message = TextMessage(source="user", content="I need to solve the equation `3x + 11 = 14`. Can you help me?") response = await agent.on_messages([message], cancellation_token) - assert response.chat_message.content is not None - assert isinstance(response.chat_message.content, str) + assert isinstance(response.chat_message, TextMessage) assert len(response.chat_message.content) > 0 assert "x = 1" in response.chat_message.content.lower() @@ -326,12 +324,11 @@ async def test_quiz_creation( response = await agent.on_messages([message], cancellation_token) # Check that the final response has non-empty inner messages (i.e. tool call events). - assert response.chat_message.content is not None - assert isinstance(response.chat_message.content, str) + assert isinstance(response.chat_message, TextMessage) assert len(response.chat_message.content) > 0 assert isinstance(response.inner_messages, list) # Ensure that at least one inner message has non-empty content. - assert any(hasattr(tool_msg, "content") and tool_msg.content for tool_msg in response.inner_messages) + assert any(isinstance(msg, ToolCallRequestEvent) for msg in response.inner_messages) await agent.delete_assistant(cancellation_token) @@ -357,14 +354,14 @@ async def test_on_reset_behavior(client: AsyncOpenAI, cancellation_token: Cancel message1 = TextMessage(source="user", content="What is my name?") response1 = await agent.on_messages([message1], cancellation_token) - assert isinstance(response1.chat_message.content, str) + assert isinstance(response1.chat_message, TextMessage) assert "john" in response1.chat_message.content.lower() await agent.on_reset(cancellation_token) message2 = TextMessage(source="user", content="What is my name?") response2 = await agent.on_messages([message2], cancellation_token) - assert isinstance(response2.chat_message.content, str) + assert isinstance(response2.chat_message, TextMessage) assert "john" in response2.chat_message.content.lower() await agent.delete_assistant(cancellation_token) From 9510e090ae71cdea1757016ac3eb7fbdd147f912 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 13:44:37 -0700 Subject: [PATCH 23/26] fix notebook --- .../agentchat-user-guide/custom-agents.ipynb | 1472 ++++++------ .../selector-group-chat.ipynb | 2046 ++++++++--------- .../agentchat-user-guide/tracing.ipynb | 7 +- .../tutorial/agents.ipynb | 1688 +++++++------- .../agentchat-user-guide/tutorial/state.ipynb | 712 +++--- .../framework/agent-and-agent-runtime.ipynb | 2 +- 6 files changed, 2963 insertions(+), 2964 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index 1a526ee6106b..d11e5d1fc15d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -1,739 +1,739 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Custom Agents\n", - "\n", - "You may have agents with behaviors that do not fall into a preset. \n", - "In such cases, you can build custom agents.\n", - "\n", - "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", - "class and implement the following abstract methods and attributes:\n", - "\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", - "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", - "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", - "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", - "yields all messages in the response." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CountDownAgent\n", - "\n", - "In this example, we create a simple agent that counts down from a given number to zero,\n", - "and produces a stream of messages with the current count." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3...\n", - "2...\n", - "1...\n", - "Done!\n" - ] - } - ], - "source": [ - "from typing import AsyncGenerator, List, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n", - "from autogen_core import CancellationToken\n", - "\n", - "\n", - "class CountDownAgent(BaseChatAgent):\n", - " def __init__(self, name: str, count: int = 3):\n", - " super().__init__(name, \"A simple agent that counts down.\")\n", - " self._count = count\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Calls the on_messages_stream.\n", - " response: Response | None = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " response = message\n", - " assert response is not None\n", - " return response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " inner_messages: List[AgentEvent | ChatMessage] = []\n", - " for i in range(self._count, 0, -1):\n", - " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", - " inner_messages.append(msg)\n", - " yield msg\n", - " # The response is returned at the end of the stream.\n", - " # It contains the final message and all the inner messages.\n", - " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_countdown_agent() -> None:\n", - " # Create a countdown agent.\n", - " countdown_agent = CountDownAgent(\"countdown\")\n", - "\n", - " # Run the agent with a given task and stream the response.\n", - " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", - " if isinstance(message, Response):\n", - " print(message.chat_message.content)\n", - " else:\n", - " print(message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", - "await run_countdown_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ArithmeticAgent\n", - "\n", - "In this example, we create an agent class that can perform simple arithmetic operations\n", - "on a given integer. Then, we will use different instances of this agent class\n", - "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", - "\n", - "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", - "after applying an arithmetic operation to the integer.\n", - "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", - "and returns a response with the result." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Callable, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "\n", - "class ArithmeticAgent(BaseChatAgent):\n", - " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", - " super().__init__(name, description=description)\n", - " self._operator_func = operator_func\n", - " self._message_history: List[ChatMessage] = []\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Update the message history.\n", - " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", - " self._message_history.extend(messages)\n", - " # Parse the number in the last message.\n", - " assert isinstance(self._message_history[-1], TextMessage)\n", - " number = int(self._message_history[-1].content)\n", - " # Apply the operator function to the number.\n", - " result = self._operator_func(number)\n", - " # Create a new message with the result.\n", - " response_message = TextMessage(content=str(result), source=self.name)\n", - " # Update the message history.\n", - " self._message_history.append(response_message)\n", - " # Return the response.\n", - " return Response(chat_message=response_message)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "The `on_messages` method may be called with an empty list of messages, in which\n", - "case it means the agent was called previously and is now being called again,\n", - "without any new messages from the caller. So it is important to keep a history\n", - "of the previous messages received by the agent, and use that history to generate\n", - "the response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", - "\n", - "- one that adds 1 to the input integer,\n", - "- one that subtracts 1 from the input integer,\n", - "- one that multiplies the input integer by 2,\n", - "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", - "- one that returns the input integer unchanged.\n", - "\n", - "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", - "and set the appropriate selector settings:\n", - "\n", - "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", - "- customize the selector prompt to tailor the model's response to the specific task." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Apply the operations to turn the given number into 25.\n", - "---------- user ----------\n", - "10\n", - "---------- multiply_agent ----------\n", - "20\n", - "---------- add_agent ----------\n", - "21\n", - "---------- multiply_agent ----------\n", - "42\n", - "---------- divide_agent ----------\n", - "21\n", - "---------- add_agent ----------\n", - "22\n", - "---------- add_agent ----------\n", - "23\n", - "---------- add_agent ----------\n", - "24\n", - "---------- add_agent ----------\n", - "25\n", - "---------- Summary ----------\n", - "Number of messages: 10\n", - "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", - "Total prompt tokens: 0\n", - "Total completion tokens: 0\n", - "Duration: 2.40 seconds\n" - ] - } - ], - "source": [ - "async def run_number_agents() -> None:\n", - " # Create agents for number operations.\n", - " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", - " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", - " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", - " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", - " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", - "\n", - " # The termination condition is to stop after 10 messages.\n", - " termination_condition = MaxMessageTermination(10)\n", - "\n", - " # Create a selector group chat.\n", - " selector_group_chat = SelectorGroupChat(\n", - " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", - " termination_condition=termination_condition,\n", - " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", - " selector_prompt=(\n", - " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", - " \"Current conversation history:\\n{history}\\n\"\n", - " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", - " ),\n", - " )\n", - "\n", - " # Run the selector group chat with a given task and stream the response.\n", - " task: List[ChatMessage] = [\n", - " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", - " TextMessage(content=\"10\", source=\"user\"),\n", - " ]\n", - " stream = selector_group_chat.run_stream(task=task)\n", - " await Console(stream)\n", - "\n", - "\n", - "# Use asyncio.run(run_number_agents()) when running in a script.\n", - "await run_number_agents()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the output, we can see that the agents have successfully transformed the input integer\n", - "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Custom Model Clients in Custom Agents\n", - "\n", - "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", - "\n", - "You can accomplish this with a custom agent that implements *your custom model client*.\n", - "\n", - "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", - "\n", - "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", - "\n", - "```bash\n", - "pip install google-genai\n", - "``` " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install google-genai\n", - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_core import CancellationToken\n", - "from autogen_core.model_context import UnboundedChatCompletionContext\n", - "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", - "from google import genai\n", - "from google.genai import types\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent):\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What is the capital of New York?\n", - "---------- gemini_assistant ----------\n", - "Albany\n", - "TERMINATE\n", - "\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Custom Agents\n", + "\n", + "You may have agents with behaviors that do not fall into a preset. \n", + "In such cases, you can build custom agents.\n", + "\n", + "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", + "class and implement the following abstract methods and attributes:\n", + "\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", + "\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", + "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", + "yields all messages in the response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CountDownAgent\n", + "\n", + "In this example, we create a simple agent that counts down from a given number to zero,\n", + "and produces a stream of messages with the current count." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3...\n", + "2...\n", + "1...\n", + "Done!\n" + ] + } + ], + "source": [ + "from typing import AsyncGenerator, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n", + "from autogen_core import CancellationToken\n", + "\n", + "\n", + "class CountDownAgent(BaseChatAgent):\n", + " def __init__(self, name: str, count: int = 3):\n", + " super().__init__(name, \"A simple agent that counts down.\")\n", + " self._count = count\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Calls the on_messages_stream.\n", + " response: Response | None = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " response = message\n", + " assert response is not None\n", + " return response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", + " inner_messages: List[AgentEvent | ChatMessage] = []\n", + " for i in range(self._count, 0, -1):\n", + " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", + " inner_messages.append(msg)\n", + " yield msg\n", + " # The response is returned at the end of the stream.\n", + " # It contains the final message and all the inner messages.\n", + " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_countdown_agent() -> None:\n", + " # Create a countdown agent.\n", + " countdown_agent = CountDownAgent(\"countdown\")\n", + "\n", + " # Run the agent with a given task and stream the response.\n", + " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", + " if isinstance(message, Response):\n", + " print(message.chat_message)\n", + " else:\n", + " print(message)\n", + "\n", + "\n", + "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", + "await run_countdown_agent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ArithmeticAgent\n", + "\n", + "In this example, we create an agent class that can perform simple arithmetic operations\n", + "on a given integer. Then, we will use different instances of this agent class\n", + "in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "to transform a given integer into another integer by applying a sequence of arithmetic operations.\n", + "\n", + "The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n", + "after applying an arithmetic operation to the integer.\n", + "In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n", + "and returns a response with the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Callable, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.conditions import MaxMessageTermination\n", + "from autogen_agentchat.messages import ChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "class ArithmeticAgent(BaseChatAgent):\n", + " def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n", + " super().__init__(name, description=description)\n", + " self._operator_func = operator_func\n", + " self._message_history: List[ChatMessage] = []\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Update the message history.\n", + " # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n", + " self._message_history.extend(messages)\n", + " # Parse the number in the last message.\n", + " assert isinstance(self._message_history[-1], TextMessage)\n", + " number = int(self._message_history[-1].content)\n", + " # Apply the operator function to the number.\n", + " result = self._operator_func(number)\n", + " # Create a new message with the result.\n", + " response_message = TextMessage(content=str(result), source=self.name)\n", + " # Update the message history.\n", + " self._message_history.append(response_message)\n", + " # Return the response.\n", + " return Response(chat_message=response_message)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "The `on_messages` method may be called with an empty list of messages, in which\n", + "case it means the agent was called previously and is now being called again,\n", + "without any new messages from the caller. So it is important to keep a history\n", + "of the previous messages received by the agent, and use that history to generate\n", + "the response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n", + "\n", + "- one that adds 1 to the input integer,\n", + "- one that subtracts 1 from the input integer,\n", + "- one that multiplies the input integer by 2,\n", + "- one that divides the input integer by 2 and rounds down to the nearest integer, and\n", + "- one that returns the input integer unchanged.\n", + "\n", + "We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n", + "and set the appropriate selector settings:\n", + "\n", + "- allow the same agent to be selected consecutively to allow for repeated operations, and\n", + "- customize the selector prompt to tailor the model's response to the specific task." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Apply the operations to turn the given number into 25.\n", + "---------- user ----------\n", + "10\n", + "---------- multiply_agent ----------\n", + "20\n", + "---------- add_agent ----------\n", + "21\n", + "---------- multiply_agent ----------\n", + "42\n", + "---------- divide_agent ----------\n", + "21\n", + "---------- add_agent ----------\n", + "22\n", + "---------- add_agent ----------\n", + "23\n", + "---------- add_agent ----------\n", + "24\n", + "---------- add_agent ----------\n", + "25\n", + "---------- Summary ----------\n", + "Number of messages: 10\n", + "Finish reason: Maximum number of messages 10 reached, current message count: 10\n", + "Total prompt tokens: 0\n", + "Total completion tokens: 0\n", + "Duration: 2.40 seconds\n" + ] + } + ], + "source": [ + "async def run_number_agents() -> None:\n", + " # Create agents for number operations.\n", + " add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n", + " multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n", + " subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n", + " divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n", + " identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n", + "\n", + " # The termination condition is to stop after 10 messages.\n", + " termination_condition = MaxMessageTermination(10)\n", + "\n", + " # Create a selector group chat.\n", + " selector_group_chat = SelectorGroupChat(\n", + " [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n", + " termination_condition=termination_condition,\n", + " allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n", + " selector_prompt=(\n", + " \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n", + " \"Current conversation history:\\n{history}\\n\"\n", + " \"Please select the most appropriate role for the next message, and only return the role name.\"\n", + " ),\n", + " )\n", + "\n", + " # Run the selector group chat with a given task and stream the response.\n", + " task: List[ChatMessage] = [\n", + " TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n", + " TextMessage(content=\"10\", source=\"user\"),\n", + " ]\n", + " stream = selector_group_chat.run_stream(task=task)\n", + " await Console(stream)\n", + "\n", + "\n", + "# Use asyncio.run(run_number_agents()) when running in a script.\n", + "await run_number_agents()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the output, we can see that the agents have successfully transformed the input integer\n", + "from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Custom Model Clients in Custom Agents\n", + "\n", + "One of the key features of the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset in AgentChat is that it takes a `model_client` argument and can use it in responding to messages. However, in some cases, you may want your agent to use a custom model client that is not currently supported (see [supported model clients](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/components/model-clients.html)) or custom model behaviours. \n", + "\n", + "You can accomplish this with a custom agent that implements *your custom model client*.\n", + "\n", + "In the example below, we will walk through an example of a custom agent that uses the [Google Gemini SDK](https://github.com/googleapis/python-genai) directly to respond to messages.\n", + "\n", + "> **Note:** You will need to install the [Google Gemini SDK](https://github.com/googleapis/python-genai) to run this example. You can install it using the following command: \n", + "\n", + "```bash\n", + "pip install google-genai\n", + "``` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install google-genai\n", + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_core import CancellationToken\n", + "from autogen_core.model_context import UnboundedChatCompletionContext\n", + "from autogen_core.models import AssistantMessage, RequestUsage, UserMessage\n", + "from google import genai\n", + "from google.genai import types\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent):\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What is the capital of New York?\n", + "---------- gemini_assistant ----------\n", + "Albany\n", + "TERMINATE\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", + "\n", + "Now, let us explore how to use this custom agent as part of a team in AgentChat." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a Haiku poem with 4 lines about the fall season.\n", + "---------- primary ----------\n", + "Crimson leaves cascade, \n", + "Whispering winds sing of change, \n", + "Chill wraps the fading, \n", + "Nature's quilt, rich and warm.\n", + "---------- gemini_critic ----------\n", + "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", + "\n", + "---------- primary ----------\n", + "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", + "\n", + "Crimson leaves drift down, \n", + "Chill winds whisper through the gold, \n", + "Autumn’s breath is near.\n", + "---------- gemini_critic ----------\n", + "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import TextMentionTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", + "\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create a critic agent based on our new GeminiAssistantAgent.\n", + "gemini_critic_agent = GeminiAssistantAgent(\n", + " \"gemini_critic\",\n", + " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + ")\n", + "\n", + "\n", + "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", + "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", + "\n", + "# Create a team with the primary and critic agents.\n", + "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", + "\n", + "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In section above, we show several very important concepts:\n", + "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", + "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Making the Custom Agent Declarative \n", + "\n", + "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", + "\n", + "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", + "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from typing import AsyncGenerator, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_core import CancellationToken, Component\n", + "from pydantic import BaseModel\n", + "from typing_extensions import Self\n", + "\n", + "\n", + "class GeminiAssistantAgentConfig(BaseModel):\n", + " name: str\n", + " description: str = \"An agent that provides assistance with ability to use tools.\"\n", + " model: str = \"gemini-1.5-flash-002\"\n", + " system_message: str | None = None\n", + "\n", + "\n", + "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", + " component_config_schema = GeminiAssistantAgentConfig\n", + " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", + "\n", + " def __init__(\n", + " self,\n", + " name: str,\n", + " description: str = \"An agent that provides assistance with ability to use tools.\",\n", + " model: str = \"gemini-1.5-flash-002\",\n", + " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", + " system_message: str\n", + " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", + " ):\n", + " super().__init__(name=name, description=description)\n", + " self._model_context = UnboundedChatCompletionContext()\n", + " self._model_client = genai.Client(api_key=api_key)\n", + " self._system_message = system_message\n", + " self._model = model\n", + "\n", + " @property\n", + " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", + " return (TextMessage,)\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " final_response = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " final_response = message\n", + "\n", + " if final_response is None:\n", + " raise AssertionError(\"The stream should have returned the final result.\")\n", + "\n", + " return final_response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", + " # Add messages to the model context\n", + " for msg in messages:\n", + " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + "\n", + " # Get conversation history\n", + " history = [\n", + " (msg.source if hasattr(msg, \"source\") else \"system\")\n", + " + \": \"\n", + " + (msg.content if isinstance(msg.content, str) else \"\")\n", + " + \"\\n\"\n", + " for msg in await self._model_context.get_messages()\n", + " ]\n", + "\n", + " # Generate response using Gemini\n", + " response = self._model_client.models.generate_content(\n", + " model=self._model,\n", + " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=self._system_message,\n", + " temperature=0.3,\n", + " ),\n", + " )\n", + "\n", + " # Create usage metadata\n", + " usage = RequestUsage(\n", + " prompt_tokens=response.usage_metadata.prompt_token_count,\n", + " completion_tokens=response.usage_metadata.candidates_token_count,\n", + " )\n", + "\n", + " # Add response to model context\n", + " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", + "\n", + " # Yield the final response\n", + " yield Response(\n", + " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", + " inner_messages=[],\n", + " )\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", + " await self._model_context.clear()\n", + "\n", + " @classmethod\n", + " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", + " return cls(\n", + " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", + " )\n", + "\n", + " def _to_config(self) -> GeminiAssistantAgentConfig:\n", + " return GeminiAssistantAgentConfig(\n", + " name=self.name,\n", + " description=self.description,\n", + " model=self._model,\n", + " system_message=self._system_message,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", + " \n", + " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"provider\": \"__main__.GeminiAssistantAgent\",\n", + " \"component_type\": \"agent\",\n", + " \"version\": 1,\n", + " \"component_version\": 1,\n", + " \"description\": null,\n", + " \"label\": \"GeminiAssistantAgent\",\n", + " \"config\": {\n", + " \"name\": \"gemini_assistant\",\n", + " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", + " \"model\": \"gemini-1.5-flash-002\",\n", + " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", + " }\n", + "}\n", + "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" + ] + } + ], + "source": [ + "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", + "config = gemini_assistant.dump_component()\n", + "print(config.model_dump_json(indent=2))\n", + "loaded_agent = GeminiAssistantAgent.load_component(config)\n", + "print(loaded_agent)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps \n", + "\n", + "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", + "\n", + "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", + "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the capital of New York?', type='TextMessage'), TextMessage(source='gemini_assistant', models_usage=RequestUsage(prompt_tokens=46, completion_tokens=5), content='Albany\\nTERMINATE\\n', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "await Console(gemini_assistant.run_stream(task=\"What is the capital of New York?\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the example above, we have chosen to provide `model`, `api_key` and `system_message` as arguments - you can choose to provide any other arguments that are required by the model client you are using or fits with your application design. \n", - "\n", - "Now, let us explore how to use this custom agent as part of a team in AgentChat." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a Haiku poem with 4 lines about the fall season.\n", - "---------- primary ----------\n", - "Crimson leaves cascade, \n", - "Whispering winds sing of change, \n", - "Chill wraps the fading, \n", - "Nature's quilt, rich and warm.\n", - "---------- gemini_critic ----------\n", - "The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\n", - "\n", - "---------- primary ----------\n", - "Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\n", - "\n", - "Crimson leaves drift down, \n", - "Chill winds whisper through the gold, \n", - "Autumn’s breath is near.\n", - "---------- gemini_critic ----------\n", - "The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a Haiku poem with 4 lines about the fall season.', type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=33, completion_tokens=31), content=\"Crimson leaves cascade, \\nWhispering winds sing of change, \\nChill wraps the fading, \\nNature's quilt, rich and warm.\", type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=60), content=\"The poem is good, but it has four lines instead of three. A haiku must have three lines with a 5-7-5 syllable structure. The content is evocative of autumn, but the form is incorrect. Please revise to adhere to the haiku's syllable structure.\\n\", type='TextMessage'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=141, completion_tokens=49), content='Thank you for your feedback! Here’s a revised haiku that follows the 5-7-5 syllable structure:\\n\\nCrimson leaves drift down, \\nChill winds whisper through the gold, \\nAutumn’s breath is near.', type='TextMessage'), TextMessage(source='gemini_critic', models_usage=RequestUsage(prompt_tokens=211, completion_tokens=32), content='The revised haiku is much improved. It correctly follows the 5-7-5 syllable structure and maintains the evocative imagery of autumn. APPROVE\\n', type='TextMessage')], stop_reason=\"Text 'APPROVE' mentioned\")" - ] - }, - "execution_count": 39, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import TextMentionTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n", - "\n", - "# Create the primary agent.\n", - "primary_agent = AssistantAgent(\n", - " \"primary\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - ")\n", - "\n", - "# Create a critic agent based on our new GeminiAssistantAgent.\n", - "gemini_critic_agent = GeminiAssistantAgent(\n", - " \"gemini_critic\",\n", - " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - ")\n", - "\n", - "\n", - "# Define a termination condition that stops the task if the critic approves or after 10 messages.\n", - "termination = TextMentionTermination(\"APPROVE\") | MaxMessageTermination(10)\n", - "\n", - "# Create a team with the primary and critic agents.\n", - "team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n", - "\n", - "await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In section above, we show several very important concepts:\n", - "- We have developed a custom agent that uses the Google Gemini SDK to respond to messages. \n", - "- We show that this custom agent can be used as part of the broader AgentChat ecosystem - in this case as a participant in a {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` as long as it inherits from {py:class}`~autogen_agentchat.agents.BaseChatAgent`.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Making the Custom Agent Declarative \n", - "\n", - "Autogen provides a [Component](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/component-config.html) interface for making the configuration of components serializable to a declarative format. This is useful for saving and loading configurations, and for sharing configurations with others. \n", - "\n", - "We accomplish this by inheriting from the `Component` class and implementing the `_from_config` and `_to_config` methods.\n", - "The declarative class can be serialized to a JSON format using the `dump_component` method, and deserialized from a JSON format using the `load_component` method." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from typing import AsyncGenerator, Sequence\n", - "\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_core import CancellationToken, Component\n", - "from pydantic import BaseModel\n", - "from typing_extensions import Self\n", - "\n", - "\n", - "class GeminiAssistantAgentConfig(BaseModel):\n", - " name: str\n", - " description: str = \"An agent that provides assistance with ability to use tools.\"\n", - " model: str = \"gemini-1.5-flash-002\"\n", - " system_message: str | None = None\n", - "\n", - "\n", - "class GeminiAssistantAgent(BaseChatAgent, Component[GeminiAssistantAgentConfig]): # type: ignore[no-redef]\n", - " component_config_schema = GeminiAssistantAgentConfig\n", - " # component_provider_override = \"mypackage.agents.GeminiAssistantAgent\"\n", - "\n", - " def __init__(\n", - " self,\n", - " name: str,\n", - " description: str = \"An agent that provides assistance with ability to use tools.\",\n", - " model: str = \"gemini-1.5-flash-002\",\n", - " api_key: str = os.environ[\"GEMINI_API_KEY\"],\n", - " system_message: str\n", - " | None = \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\",\n", - " ):\n", - " super().__init__(name=name, description=description)\n", - " self._model_context = UnboundedChatCompletionContext()\n", - " self._model_client = genai.Client(api_key=api_key)\n", - " self._system_message = system_message\n", - " self._model = model\n", - "\n", - " @property\n", - " def produced_message_types(self) -> Sequence[type[ChatMessage]]:\n", - " return (TextMessage,)\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " final_response = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " final_response = message\n", - "\n", - " if final_response is None:\n", - " raise AssertionError(\"The stream should have returned the final result.\")\n", - "\n", - " return final_response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", - " # Add messages to the model context\n", - " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", - "\n", - " # Get conversation history\n", - " history = [\n", - " (msg.source if hasattr(msg, \"source\") else \"system\")\n", - " + \": \"\n", - " + (msg.content if isinstance(msg.content, str) else \"\")\n", - " + \"\\n\"\n", - " for msg in await self._model_context.get_messages()\n", - " ]\n", - "\n", - " # Generate response using Gemini\n", - " response = self._model_client.models.generate_content(\n", - " model=self._model,\n", - " contents=f\"History: {history}\\nGiven the history, please provide a response\",\n", - " config=types.GenerateContentConfig(\n", - " system_instruction=self._system_message,\n", - " temperature=0.3,\n", - " ),\n", - " )\n", - "\n", - " # Create usage metadata\n", - " usage = RequestUsage(\n", - " prompt_tokens=response.usage_metadata.prompt_token_count,\n", - " completion_tokens=response.usage_metadata.candidates_token_count,\n", - " )\n", - "\n", - " # Add response to model context\n", - " await self._model_context.add_message(AssistantMessage(content=response.text, source=self.name))\n", - "\n", - " # Yield the final response\n", - " yield Response(\n", - " chat_message=TextMessage(content=response.text, source=self.name, models_usage=usage),\n", - " inner_messages=[],\n", - " )\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " \"\"\"Reset the assistant by clearing the model context.\"\"\"\n", - " await self._model_context.clear()\n", - "\n", - " @classmethod\n", - " def _from_config(cls, config: GeminiAssistantAgentConfig) -> Self:\n", - " return cls(\n", - " name=config.name, description=config.description, model=config.model, system_message=config.system_message\n", - " )\n", - "\n", - " def _to_config(self) -> GeminiAssistantAgentConfig:\n", - " return GeminiAssistantAgentConfig(\n", - " name=self.name,\n", - " description=self.description,\n", - " model=self._model,\n", - " system_message=self._system_message,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have the required methods implemented, we can now load and dump the custom agent to and from a JSON format, and then load the agent from the JSON format.\n", - " \n", - " > Note: You should set the `component_provider_override` class variable to the full path of the module containing the custom agent class e.g., (`mypackage.agents.GeminiAssistantAgent`). This is used by `load_component` method to determine how to instantiate the class. \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"provider\": \"__main__.GeminiAssistantAgent\",\n", - " \"component_type\": \"agent\",\n", - " \"version\": 1,\n", - " \"component_version\": 1,\n", - " \"description\": null,\n", - " \"label\": \"GeminiAssistantAgent\",\n", - " \"config\": {\n", - " \"name\": \"gemini_assistant\",\n", - " \"description\": \"An agent that provides assistance with ability to use tools.\",\n", - " \"model\": \"gemini-1.5-flash-002\",\n", - " \"system_message\": \"You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.\"\n", - " }\n", - "}\n", - "<__main__.GeminiAssistantAgent object at 0x11a5c5a90>\n" - ] - } - ], - "source": [ - "gemini_assistant = GeminiAssistantAgent(\"gemini_assistant\")\n", - "config = gemini_assistant.dump_component()\n", - "print(config.model_dump_json(indent=2))\n", - "loaded_agent = GeminiAssistantAgent.load_component(config)\n", - "print(loaded_agent)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps \n", - "\n", - "So far, we have seen how to create custom agents, add custom model clients to agents, and make custom agents declarative. There are a few ways in which this basic sample can be extended:\n", - "\n", - "- Extend the Gemini model client to handle function calling similar to the {py:class}`~autogen_agentchat.agents.AssistantAgent` class. https://ai.google.dev/gemini-api/docs/function-calling \n", - "- Implement a package with a custom agent and experiment with using its declarative format in a tool like [AutoGen Studio](https://microsoft.github.io/autogen/stable/user-guide/autogenstudio-user-guide/index.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index 522c26b2098c..fa3e44b94428 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -1,1026 +1,1026 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Selector Group Chat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", - "\n", - "Key features include:\n", - "\n", - "- Model-based speaker selection\n", - "- Configurable participant roles and descriptions\n", - "- Prevention of consecutive turns by the same speaker (optional)\n", - "- Customizable selection prompting\n", - "- Customizable selection function to override the default model-based selection\n", - "- Customizable candidate function to narrow-down the set of agents for selection using model\n", - "\n", - "```{note}\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", - "```\n", - "\n", - "## How Does it Work?\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", - "but with a model-based next speaker selection mechanism.\n", - "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", - "the following steps are executed:\n", - "\n", - "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", - "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", - "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", - "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", - "\n", - "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", - "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", - "\n", - "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Web Search/Analysis" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List, Sequence\n", - "\n", - "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Agents\n", - "\n", - "![Selector Group Chat](selector-group-chat.svg)\n", - "\n", - "This system uses three specialized agents:\n", - "\n", - "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", - "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", - "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", - "def search_web_tool(query: str) -> str:\n", - " if \"2006-2007\" in query:\n", - " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \"\"\"\n", - " elif \"2007-2008\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", - " elif \"2008-2009\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", - " return \"No data found.\"\n", - "\n", - "\n", - "def percentage_change_tool(start: float, end: float) -> float:\n", - " return ((end - start) / start) * 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", - "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", - "so it is recommended to provide meaningful names and descriptions." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "planning_agent = AssistantAgent(\n", - " \"PlanningAgent\",\n", - " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a planning agent.\n", - " Your job is to break down complex tasks into smaller, manageable subtasks.\n", - " Your team members are:\n", - " WebSearchAgent: Searches for information\n", - " DataAnalystAgent: Performs calculations\n", - "\n", - " You only plan and delegate tasks - you do not execute them yourself.\n", - "\n", - " When assigning tasks, use this format:\n", - " 1. : \n", - "\n", - " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", - " \"\"\",\n", - ")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a web search agent.\n", - " Your only tool is search_tool - use it to find information.\n", - " You make only one search call at a time.\n", - " Once you have the results, you never do calculations based on them.\n", - " \"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"\n", - " You are a data analyst.\n", - " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", - " If you have not seen the data, ask for it.\n", - " \"\"\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", - "tool output as the response. If your tool does not return a well-formed\n", - "string in natural language format, you may want to add a reflection step\n", - "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", - "This will allow the agent to reflect on the tool output and provide a natural\n", - "language response.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Workflow\n", - "\n", - "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", - "\n", - "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", - " ` : `\n", - "\n", - "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", - "\n", - "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", - "\n", - "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", - "\n", - "6. The workflow continues with agents being dynamically selected until either:\n", - " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", - " - An alternative termination condition is met (e.g., a maximum number of messages)\n", - "\n", - "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Termination Conditions\n", - "\n", - "Let's use two termination conditions:\n", - "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", - "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", - "max_messages_termination = MaxMessageTermination(max_messages=25)\n", - "termination = text_mention_termination | max_messages_termination" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Selector Prompt\n", - "\n", - "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", - "the next speaker based on the conversation context.\n", - "We will use a custom selector prompt to properly align with the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "Make sure the planner agent has assigned tasks before other agents start working.\n", - "Only select one agent.\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "Try not to overload the model with too much instruction in the selector prompt.\n", - "\n", - "What is too much? It depends on the capabilities of the model you are using.\n", - "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", - "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", - "such as the one used in this example.\n", - "\n", - "Generally, if you find yourself writing multiple conditions for each agent,\n", - "it is a sign that you should consider using a custom selection function,\n", - "or breaking down the task into smaller, sequential tasks to be handled by\n", - "separate agents or teams.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Running the Team\n", - "\n", - "Let's create the team with the agents, termination conditions, and custom selector prompt." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we run the team with a task to find information about an NBA player." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To complete this task, we need to perform the following subtasks:\n", - "\n", - "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", - "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", - "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", - "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "I'll assign these tasks accordingly:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- WebSearchAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", - "\n", - "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Selector Group Chat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other members. A generative model (e.g., an LLM) selects the next speaker based on the shared context, enabling dynamic, context-aware collaboration.\n", + "\n", + "Key features include:\n", + "\n", + "- Model-based speaker selection\n", + "- Configurable participant roles and descriptions\n", + "- Prevention of consecutive turns by the same speaker (optional)\n", + "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "- Customizable candidate function to narrow-down the set of agents for selection using model\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API. For more control and customization, refer to the [Group Chat Pattern](../core-user-guide/design-patterns/group-chat.ipynb) in the Core API documentation to implement your own group chat logic.\n", + "```\n", + "\n", + "## How Does it Work?\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", + "\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. By default, the team will not select the same speak consecutively unless it is the only agent available. This can be changed by setting `allow_repeated_speaker=True`. You can also override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", + "\n", + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Web Search/Analysis" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, Sequence\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import AgentEvent, ChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " WebSearchAgent: Searches for information\n", + " DataAnalystAgent: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " If you have not seen the data, ask for it.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` returns the\n", + "tool output as the response. If your tool does not return a well-formed\n", + "string in natural language format, you may want to add a reflection step\n", + "within the agent by setting `reflect_on_tool_use=True` when creating the agent.\n", + "This will allow the agent to reflect on the tool output and provide a natural\n", + "language response.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Termination Conditions\n", + "\n", + "Let's use two termination conditions:\n", + "{py:class}`~autogen_agentchat.conditions.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.conditions.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Selector Prompt\n", + "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` uses a model to select\n", + "the next speaker based on the conversation context.\n", + "We will use a custom selector prompt to properly align with the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "Make sure the planner agent has assigned tasks before other agents start working.\n", + "Only select one agent.\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "Try not to overload the model with too much instruction in the selector prompt.\n", + "\n", + "What is too much? It depends on the capabilities of the model you are using.\n", + "For GPT-4o and equivalents, you can use a selector prompt with a condition for when each speaker should be selected.\n", + "For smaller models such as Phi-4, you should keep the selector prompt as simple as possible\n", + "such as the one used in this example.\n", + "\n", + "Generally, if you find yourself writing multiple conditions for each agent,\n", + "it is a sign that you should consider using a custom selection function,\n", + "or breaking down the task into smaller, sequential tasks to be handled by\n", + "separate agents or teams.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Running the Team\n", + "\n", + "Let's create the team with the agents, termination conditions, and custom selector prompt." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To complete this task, we need to perform the following subtasks:\n", + "\n", + "1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\n", + "2. Gather data on this player's total rebounds for the 2007-2008 season.\n", + "3. Gather data on this player's total rebounds for the 2008-2009 season.\n", + "4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "I'll assign these tasks accordingly:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", + "\n", + "Next, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process.\n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "This allows us to implement more complex selection logic and state-based transitions.\n", + "\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we need to follow these steps: \n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", + "3. Calculate the percentage change in his total rebounds between the two seasons.\n", + "\n", + "Let's delegate these tasks:\n", + "\n", + "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", + "\n", + "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", + "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", + "\n", + "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + " selector_func=selector_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", + "\n", + "```{tip}\n", + "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", + "on each turn. \n", + "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", + "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", + "when it has finished running all the tools it needs to run, you can do so by\n", + "checking the last message and returning the agent if it is a\n", + "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Candidate Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", + "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", + "\n", + "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", + "\n", + "\n", + "```{note}\n", + "The `candidate_func` is only valid if `selector_func` is not set.\n", + "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer this question, we'll break it down into two main subtasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "21.99074074074074\n", + "---------- PlanningAgent ----------\n", + "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", + "\n", + "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", + "\n", + "Now, let's find the necessary rebound statistics:\n", + "\n", + "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", + "\n", + "Now, let's calculate the percentage change.\n", + "\n", + "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]:\n", + " # keep planning_agent first one to plan out the tasks\n", + " if messages[-1].source == \"user\":\n", + " return [planning_agent.name]\n", + "\n", + " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", + " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", + " # then return those specific agents\n", + " last_message = messages[-1]\n", + " if last_message.source == planning_agent.name:\n", + " participants = []\n", + " if web_search_agent.name in last_message.content_to_text():\n", + " participants.append(web_search_agent.name)\n", + " if data_analyst_agent.name in last_message.content_to_text():\n", + " participants.append(data_analyst_agent.name)\n", + " if participants:\n", + " return participants # SelectorGroupChat will select from the remaining two agents.\n", + "\n", + " # we can assume that the task is finished once the web_search_agent\n", + " # and data_analyst_agent have took their turns, thus we send\n", + " # in planning_agent to terminate the chat\n", + " previous_set_of_agents = set(message.source for message in messages)\n", + " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", + " return [planning_agent.name]\n", + "\n", + " # if no-conditions are met then return all the agents\n", + " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", + "\n", + "\n", + "# Reset the previous team and run the chat again with the selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " candidate_func=candidate_func,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User Feedback\n", + "\n", + "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", + "provide user feedback during a run.\n", + "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", + "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", + "\n", + "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", + "web search example, we simply add it to the team and update the selector function\n", + "to always check for user feedback after the planning agent speaks.\n", + "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", + "the planning agent tries again, until the user approves." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- PlanningAgent ----------\n", + "To address the user's query, we will need to perform the following tasks:\n", + "\n", + "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + "2. Find the total rebounds for that player in the 2007-2008 season.\n", + "3. Find the total rebounds for that player in the 2008-2009 season.\n", + "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Let's assign these tasks:\n", + "\n", + "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", + " \n", + "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", + "\n", + "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", + "\n", + "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", + "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- PlanningAgent ----------\n", + "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", + "\n", + "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", + "---------- UserProxyAgent ----------\n", + "approve\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", + "\n", + "TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", + "\n", + "\n", + "def selector_func_with_user_proxy(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", + " # Planning agent should be the first to engage when given a new task, or check progress.\n", + " return planning_agent.name\n", + " if messages[-1].source == planning_agent.name:\n", + " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", + " # User has approved the plan, proceed to the next agent.\n", + " return None\n", + " # Use the user proxy agent to get the user's approval to proceed.\n", + " return user_proxy_agent.name\n", + " if messages[-1].source == user_proxy_agent.name:\n", + " # If the user does not approve, return to the planning agent.\n", + " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", + " return planning_agent.name\n", + " return None\n", + "\n", + "\n", + "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", + "await team.reset()\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " selector_func=selector_func_with_user_proxy,\n", + " allow_repeated_speaker=True,\n", + ")\n", + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, the user's feedback is incorporated into the conversation flow,\n", + "and the user can approve or reject the planning agent's decisions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Reasoning Models\n", + "\n", + "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", + "and `gemini-1.5-flash` are great at following instructions, so you can\n", + "have relatively detailed instructions in the selector prompt for the team and the \n", + "system messages for each agent to guide their behavior.\n", + "\n", + "However, if you are using a reasoning model like `o3-mini`, you will need to\n", + "keep the selector prompt and system messages as simple and to the point as possible.\n", + "This is because the reasoning models are already good at coming up with their own \n", + "instructions given the context provided to them.\n", + "\n", + "This also means that we don't need a planning agent to break down the task\n", + "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", + "uses a reasoning model can do that on its own.\n", + "\n", + "In the following example, we will use `o3-mini` as the model for the\n", + "agents and the team, and we will not use a planning agent.\n", + "Also, we are keeping the selector prompt and system messages as simple as possible." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", + ")\n", + "\n", + "user_proxy_agent = UserProxyAgent(\n", + " \"UserProxyAgent\",\n", + " description=\"A user to approve or disapprove tasks.\",\n", + ")\n", + "\n", + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + "{roles}\n", + "\n", + "Current conversation context:\n", + "{history}\n", + "\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "When the task is complete, let the user approve or disapprove the task.\n", + "\"\"\"\n", + "\n", + "team = SelectorGroupChat(\n", + " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination, # Use the same termination condition as before.\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- DataAnalystAgent ----------\n", + "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "---------- DataAnalystAgent ----------\n", + "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- DataAnalystAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", + "---------- UserProxyAgent ----------\n", + "Approve. TERMINATE\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "For more guidance on how to prompt reasoning models, see the\n", + "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=220), metadata={}, content=\"To complete this task, we need to perform the following subtasks:\\n\\n1. Find out which Miami Heat player had the highest points in the 2006-2007 season.\\n2. Gather data on this player's total rebounds for the 2007-2008 season.\\n3. Gather data on this player's total rebounds for the 2008-2009 season.\\n4. Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nI'll assign these tasks accordingly:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebounds for this player in the 2007-2008 NBA season.\\n3. WebSearchAgent: Find the total rebounds for this player in the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds from the 2007-2008 season to the 2008-2009 season for this player.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=368, completion_tokens=27), metadata={}, content=[FunctionCall(id='call_89tUNHaAM0kKQYPJLleGUKK7', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_89tUNHaAM0kKQYPJLleGUKK7', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ThoughtEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nNext, I will search for Dwyane Wade's total rebounds for the 2007-2008 season.\", type='ThoughtEvent'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=460, completion_tokens=83), metadata={}, content=[FunctionCall(id='call_RC55TkSjG3JXRuVOTPrcE1RL', arguments='{\"query\":\"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_RC55TkSjG3JXRuVOTPrcE1RL', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=585, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_pBXoABrErDow0rZjw3tjOZol', arguments='{\"query\":\"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pBXoABrErDow0rZjw3tjOZol', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=496, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_qMxxXtcJsiK8KFSSCx3zm0is', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_qMxxXtcJsiK8KFSSCx3zm0is', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=528, completion_tokens=80), metadata={}, content=\"The player with the highest points for the Miami Heat in the 2006-2007 NBA season was Dwyane Wade, who scored 1,397 points. The percentage change in Dwyane Wade's total rebounds from 214 in the 2007-2008 season to 398 in the 2008-2009 season is approximately 85.98%.\\n\\nTERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use asyncio.run(...) if you are running this in a script.\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Selector Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Often times we want better control over the selection process.\n", - "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", - "This allows us to implement more complex selection logic and state-based transitions.\n", - "\n", - "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", - "\n", - "```{note}\n", - "Returning `None` from the custom selector function will use the default model-based selection.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we need to follow these steps: \n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\n", - "3. Calculate the percentage change in his total rebounds between the two seasons.\n", - "\n", - "Let's delegate these tasks:\n", - "\n", - "1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\n", - "\n", - "2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\n", - "3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\n", - "\n", - "4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=192), content=\"To answer this question, we need to follow these steps: \\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Retrieve the total rebounds of that player for the 2007-2008 and 2008-2009 seasons.\\n3. Calculate the percentage change in his total rebounds between the two seasons.\\n\\nLet's delegate these tasks:\\n\\n1. WebSearchAgent: Find the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for the identified player during the 2008-2009 NBA season.\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for the player found.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=340, completion_tokens=27), content=[FunctionCall(id='call_Pz82ndNLSV4cH0Sg6g7ArP4L', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_Pz82ndNLSV4cH0Sg6g7ArP4L')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=420, completion_tokens=87), content=\"Great! Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season. Now, let's continue with the next tasks:\\n\\n2. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2007-2008 NBA season.\\n3. WebSearchAgent: Retrieve the total rebounds for Dwyane Wade during the 2008-2009 NBA season.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=525, completion_tokens=71), content=[FunctionCall(id='call_3qv9so2DXFZIHtzqDIfXoFID', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Vh7zzzWUeiUAvaYjP0If0k1k', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_3qv9so2DXFZIHtzqDIfXoFID'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Vh7zzzWUeiUAvaYjP0If0k1k')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=569, completion_tokens=68), content=\"Now let's calculate the percentage change in total rebounds between the 2007-2008 and 2008-2009 seasons for Dwyane Wade.\\n\\n4. DataAnalystAgent: Calculate the percentage change in total rebounds for Dwyane Wade between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=627, completion_tokens=21), content=[FunctionCall(id='call_FXnPSr6JVGfAWs3StIizbt2V', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_FXnPSr6JVGfAWs3StIizbt2V')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=659, completion_tokens=76), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring a total of 1397 points. The percentage change in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) is approximately 86.0%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", - " if messages[-1].source != planning_agent.name:\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - " selector_func=selector_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents.\n", - "\n", - "```{tip}\n", - "Each participant agent only makes one step (executing tools, generating a response, etc.)\n", - "on each turn. \n", - "If you want an {py:class}`~autogen_agentchat.agents.AssistantAgent` to repeat\n", - "until it stop returning a {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`\n", - "when it has finished running all the tools it needs to run, you can do so by\n", - "checking the last message and returning the agent if it is a\n", - "{py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage`.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Candidate Function" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One more possible requirement might be to automatically select the next speaker from a filtered list of agents.\n", - "For this, we can set `candidate_func` parameter with a custom candidate function to filter down the list of potential agents for speaker selection for each turn of groupchat.\n", - "\n", - "This allow us to restrict speaker selection to a specific set of agents after a given agent.\n", - "\n", - "\n", - "```{note}\n", - "The `candidate_func` is only valid if `selector_func` is not set.\n", - "Returning `None` or an empty list `[]` from the custom candidate function will raise a `ValueError`.\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To answer this question, we'll break it down into two main subtasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "21.99074074074074\n", - "---------- PlanningAgent ----------\n", - "It seems we've missed some context there, so let's assign the subtasks again for clarity:\n", - "\n", - "Based on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\n", - "\n", - "Now, let's find the necessary rebound statistics:\n", - "\n", - "2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n", - "\n", - "Now, let's calculate the percentage change.\n", - "\n", - "3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=169), metadata={}, content=\"To answer this question, we'll break it down into two main subtasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Calculate the percentage change in that player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. WebSearchAgent: Search for the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. WebSearchAgent: Find the total rebound statistics for that identified player for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons once the data is retrieved.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=324, completion_tokens=28), metadata={}, content=[FunctionCall(id='call_WtR5KTfEIxs3jIO25gjAw7dF', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_WtR5KTfEIxs3jIO25gjAw7dF', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=390, completion_tokens=37), metadata={}, content=[FunctionCall(id='call_9HA3DEacUl4WuG2G2PtRkXAO', arguments='{\"start\": 432, \"end\": 527}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='21.99074074074074', name='percentage_change_tool', call_id='call_9HA3DEacUl4WuG2G2PtRkXAO', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='21.99074074074074', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=413, completion_tokens=137), metadata={}, content=\"It seems we've missed some context there, so let's assign the subtasks again for clarity:\\n\\nBased on the search results, Dwyane Wade had the highest points for the Miami Heat in the 2006-2007 season with 1397 points.\\n\\nNow, let's find the necessary rebound statistics:\\n\\n2. WebSearchAgent: Find Dwyane Wade's total rebound statistics for both the 2007-2008 and 2008-2009 NBA seasons.\\n3. DataAnalystAgent: Once the data is retrieved, calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons.\", type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=576, completion_tokens=73), metadata={}, content=[FunctionCall(id='call_3i1wTDSjkGg6Ev8YKYWkZK55', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_NRAs6jHxXRi8zsvpW5WlHAaU', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_3i1wTDSjkGg6Ev8YKYWkZK55', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_NRAs6jHxXRi8zsvpW5WlHAaU', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, metadata={}, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=612, completion_tokens=84), metadata={}, content=\"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\\n\\nNow, let's calculate the percentage change.\\n\\n3. DataAnalystAgent: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season to the 2008-2009 season.\", type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=720, completion_tokens=21), metadata={}, content=[FunctionCall(id='call_XECA7ezz7VIKbf8IbZYSCSpI', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_XECA7ezz7VIKbf8IbZYSCSpI', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, metadata={}, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=718, completion_tokens=63), metadata={}, content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1397 points. The percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons was approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def candidate_func(messages: Sequence[AgentEvent | ChatMessage]) -> List[str]:\n", - " # keep planning_agent first one to plan out the tasks\n", - " if messages[-1].source == \"user\":\n", - " return [planning_agent.name]\n", - "\n", - " # if previous agent is planning_agent and if it explicitely asks for web_search_agent\n", - " # or data_analyst_agent or both (in-case of re-planning or re-assignment of tasks)\n", - " # then return those specific agents\n", - " last_message = messages[-1]\n", - " if last_message.source == planning_agent.name:\n", - " participants = []\n", - " if web_search_agent.name in last_message.content:\n", - " participants.append(web_search_agent.name)\n", - " if data_analyst_agent.name in last_message.content:\n", - " participants.append(data_analyst_agent.name)\n", - " if participants:\n", - " return participants # SelectorGroupChat will select from the remaining two agents.\n", - "\n", - " # we can assume that the task is finished once the web_search_agent\n", - " # and data_analyst_agent have took their turns, thus we send\n", - " # in planning_agent to terminate the chat\n", - " previous_set_of_agents = set(message.source for message in messages)\n", - " if web_search_agent.name in previous_set_of_agents and data_analyst_agent.name in previous_set_of_agents:\n", - " return [planning_agent.name]\n", - "\n", - " # if no-conditions are met then return all the agents\n", - " return [planning_agent.name, web_search_agent.name, data_analyst_agent.name]\n", - "\n", - "\n", - "# Reset the previous team and run the chat again with the selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " candidate_func=candidate_func,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see from the conversation log that the Planning Agent returns to conversation once the Web Search Agent and Data Analyst Agent took their turns and it finds that the task was not finished as expected so it called the WebSearchAgent again to get rebound values and then called DataAnalysetAgent to get the percentage change." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## User Feedback\n", - "\n", - "We can add {py:class}`~autogen_agentchat.agents.UserProxyAgent` to the team to\n", - "provide user feedback during a run.\n", - "See [Human-in-the-Loop](./tutorial/human-in-the-loop.ipynb) for more details\n", - "about {py:class}`~autogen_agentchat.agents.UserProxyAgent`.\n", - "\n", - "To use the {py:class}`~autogen_agentchat.agents.UserProxyAgent` in the \n", - "web search example, we simply add it to the team and update the selector function\n", - "to always check for user feedback after the planning agent speaks.\n", - "If the user responds with `\"APPROVE\"`, the conversation continues, otherwise,\n", - "the planning agent tries again, until the user approves." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- PlanningAgent ----------\n", - "To address the user's query, we will need to perform the following tasks:\n", - "\n", - "1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - "2. Find the total rebounds for that player in the 2007-2008 season.\n", - "3. Find the total rebounds for that player in the 2008-2009 season.\n", - "4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Let's assign these tasks:\n", - "\n", - "1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\n", - " \n", - "(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\n", - "\n", - "Next, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\n", - "\n", - "2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\n", - "3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- PlanningAgent ----------\n", - "Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\n", - "\n", - "4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\n", - "---------- UserProxyAgent ----------\n", - "approve\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\n", - "\n", - "TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=161, completion_tokens=166), content=\"To address the user's query, we will need to perform the following tasks:\\n\\n1. Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n2. Find the total rebounds for that player in the 2007-2008 season.\\n3. Find the total rebounds for that player in the 2008-2009 season.\\n4. Calculate the percentage change in the total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's assign these tasks:\\n\\n1. **WebSearchAgent**: Identify the Miami Heat player with the highest points in the 2006-2007 season.\\n \\n(Task 2 and 3 depend on the result of Task 1. We'll proceed with Tasks 2 and 3 once Task 1 is complete.)\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='2a433f88-f886-4b39-a078-ea1acdcb2f9d', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=323, completion_tokens=28), content=[FunctionCall(id='call_0prr3fUnG5CtisUG7QeygW0w', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_0prr3fUnG5CtisUG7QeygW0w')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=403, completion_tokens=112), content=\"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points.\\n\\nNext, we need to find Dwyane Wade's total rebounds for the 2007-2008 and 2008-2009 seasons:\\n\\n2. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2007-2008 season.\\n3. **WebSearchAgent**: Find Dwyane Wade's total rebounds for the 2008-2009 season.\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='23dd4570-2391-41e9-aeea-86598499792c', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=543, completion_tokens=73), content=[FunctionCall(id='call_fBZe80NaBfruOVGwRWbhXyRm', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 NBA season\"}', name='search_web_tool'), FunctionCall(id='call_cURYibna4fGxySiL7IYt0c3s', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 NBA season\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_fBZe80NaBfruOVGwRWbhXyRm'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_cURYibna4fGxySiL7IYt0c3s')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\\nThe number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=586, completion_tokens=70), content=\"Now that we have Dwyane Wade's total rebounds for both seasons, we can calculate the percentage change:\\n\\n4. **DataAnalystAgent**: Calculate the percentage change in Dwyane Wade's total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds).\", type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='e849d193-4ab3-4558-8560-7dbc062a0aee', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='approve', type='TextMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=655, completion_tokens=21), content=[FunctionCall(id='call_z3uog7t2x0z1Suzl5hACF9hY', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_z3uog7t2x0z1Suzl5hACF9hY')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=687, completion_tokens=74), content='Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season, which is a percentage change of approximately 85.98%.\\n\\nTERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "user_proxy_agent = UserProxyAgent(\"UserProxyAgent\", description=\"A proxy for the user to approve or disapprove tasks.\")\n", - "\n", - "\n", - "def selector_func_with_user_proxy(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:\n", - " if messages[-1].source != planning_agent.name and messages[-1].source != user_proxy_agent.name:\n", - " # Planning agent should be the first to engage when given a new task, or check progress.\n", - " return planning_agent.name\n", - " if messages[-1].source == planning_agent.name:\n", - " if messages[-2].source == user_proxy_agent.name and \"APPROVE\" in messages[-1].content.upper(): # type: ignore\n", - " # User has approved the plan, proceed to the next agent.\n", - " return None\n", - " # Use the user proxy agent to get the user's approval to proceed.\n", - " return user_proxy_agent.name\n", - " if messages[-1].source == user_proxy_agent.name:\n", - " # If the user does not approve, return to the planning agent.\n", - " if \"APPROVE\" not in messages[-1].content.upper(): # type: ignore\n", - " return planning_agent.name\n", - " return None\n", - "\n", - "\n", - "# Reset the previous agents and run the chat again with the user proxy agent and selector function.\n", - "await team.reset()\n", - "team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " selector_func=selector_func_with_user_proxy,\n", - " allow_repeated_speaker=True,\n", - ")\n", - "\n", - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, the user's feedback is incorporated into the conversation flow,\n", - "and the user can approve or reject the planning agent's decisions." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Reasoning Models\n", - "\n", - "So far in the examples, we have used a `gpt-4o` model. Models like `gpt-4o`\n", - "and `gemini-1.5-flash` are great at following instructions, so you can\n", - "have relatively detailed instructions in the selector prompt for the team and the \n", - "system messages for each agent to guide their behavior.\n", - "\n", - "However, if you are using a reasoning model like `o3-mini`, you will need to\n", - "keep the selector prompt and system messages as simple and to the point as possible.\n", - "This is because the reasoning models are already good at coming up with their own \n", - "instructions given the context provided to them.\n", - "\n", - "This also means that we don't need a planning agent to break down the task\n", - "anymore, since the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` that\n", - "uses a reasoning model can do that on its own.\n", - "\n", - "In the following example, we will use `o3-mini` as the model for the\n", - "agents and the team, and we will not use a planning agent.\n", - "Also, we are keeping the selector prompt and system messages as simple as possible." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"o3-mini\")\n", - "\n", - "web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"Use web search tool to find information.\"\"\",\n", - ")\n", - "\n", - "data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"Use tool to perform calculation. If you have not seen the data, ask for it.\"\"\",\n", - ")\n", - "\n", - "user_proxy_agent = UserProxyAgent(\n", - " \"UserProxyAgent\",\n", - " description=\"A user to approve or disapprove tasks.\",\n", - ")\n", - "\n", - "selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - "{roles}\n", - "\n", - "Current conversation context:\n", - "{history}\n", - "\n", - "Read the above conversation, then select an agent from {participants} to perform the next task.\n", - "When the task is complete, let the user approve or disapprove the task.\n", - "\"\"\"\n", - "\n", - "team = SelectorGroupChat(\n", - " [web_search_agent, data_analyst_agent, user_proxy_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination, # Use the same termination condition as before.\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- DataAnalystAgent ----------\n", - "I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "---------- DataAnalystAgent ----------\n", - "Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- DataAnalystAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", - "---------- UserProxyAgent ----------\n", - "Approve. TERMINATE\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=384), content=[FunctionCall(id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', arguments='{\"query\": \"Who was the Miami Heat player with the highest points in the 2006-2007 season Miami Heat statistics Dwyane Wade rebounds percentage change 2007-2008 2008-2009 seasons\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_hl7EP6Lp5jj5wEdxeNHTwUVG', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=183, completion_tokens=1038), content='I found that in the 2006–2007 season the player with the highest points was Dwyane Wade (with 1,397 points). Could you please provide Dwyane Wade’s total rebounds for the 2007–2008 and the 2008–2009 seasons so I can calculate the percentage change?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=299, completion_tokens=109), content=[FunctionCall(id='call_lppGTILXDvO9waPwKO66ehK6', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 and 2008-2009 seasons for Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_lppGTILXDvO9waPwKO66ehK6', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=291, completion_tokens=224), content='Could you please provide Dwyane Wade’s total rebounds in the 2008-2009 season?', type='TextMessage'), ToolCallRequestEvent(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=401, completion_tokens=37), content=[FunctionCall(id='call_r8DBcbJtQfdtugLtyTrqOvoK', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season Miami Heat\"}', name='search_web_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_r8DBcbJtQfdtugLtyTrqOvoK', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='WebSearchAgent', models_usage=None, content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', type='ToolCallSummaryMessage'), ToolCallRequestEvent(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=353, completion_tokens=158), content=[FunctionCall(id='call_4jejv1wM7V1osbBCxJze8aQM', arguments='{\"start\": 214, \"end\": 398}', name='percentage_change_tool')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_4jejv1wM7V1osbBCxJze8aQM', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='DataAnalystAgent', models_usage=None, content='85.98130841121495', type='ToolCallSummaryMessage'), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=394, completion_tokens=138), content='Dwyane Wade was the Miami Heat player with the highest total points (1,397) during the 2006-2007 season. His total rebounds increased by approximately 86% from 214 in the 2007-2008 season to 398 in the 2008-2009 season.', type='TextMessage'), UserInputRequestedEvent(source='UserProxyAgent', models_usage=None, request_id='b3b05408-73fc-47d4-b832-16c9f447cd6e', content='', type='UserInputRequestedEvent'), TextMessage(source='UserProxyAgent', models_usage=None, content='Approve. TERMINATE', type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await Console(team.run_stream(task=task))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{tip}\n", - "For more guidance on how to prompt reasoning models, see the\n", - "Azure AI Services Blog on [Prompt Engineering for OpenAI's O1 and O3-mini Reasoning Models](https://techcommunity.microsoft.com/blog/azure-ai-services-blog/prompt-engineering-for-openai%E2%80%99s-o1-and-o3-mini-reasoning-models/4374010)\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb index 8e6f07b90482..e4dc8411c3a3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb @@ -359,10 +359,9 @@ " if not isinstance(response, TaskResult):\n", " print(f\"\\n-- {response.source} -- : {response.content}\")\n", " with tracer.start_as_current_span(f\"agent_message.{response.source}\") as message_span:\n", - " content = response.content if isinstance(response.content, str) else str(response.content)\n", " message_span.set_attribute(\"agent.name\", response.source)\n", - " message_span.set_attribute(\"message.content\", content)\n", - " print(f\"{response.source}: {response.content}\")\n", + " message_span.set_attribute(\"message.content\", response.content_to_text())\n", + " print(f\"{response.source}: {response.content_to_text()}\")\n", "\n", " await model_client.close()\n", "\n", @@ -395,7 +394,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 35c9052dee75..64777d7b38af 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -1,847 +1,847 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agents\n", - "\n", - "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", - "All agents share the following attributes and methods:\n", - "\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", - "\n", - "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", - "\n", - "\n", - "## Assistant Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", - "uses a language model and has the ability to use tools." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.messages import TextMessage\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "# Define a tool that searches the web for information.\n", - "async def web_search(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " # api_key=\"YOUR_API_KEY\",\n", - ")\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Getting Responses\n", - "\n", - "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", - "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" - ] - } - ], - "source": [ - "async def assistant_run() -> None:\n", - " response = await agent.on_messages(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " )\n", - " print(response.inner_messages)\n", - " print(response.chat_message)\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run()) when running in a script.\n", - "await assistant_run()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", - "returns a {py:class}`~autogen_agentchat.base.Response`\n", - "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", - "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", - "which stores the agent's \"thought process\" that led to the final response.\n", - "\n", - "```{note}\n", - "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", - "will update the internal state of the agent -- it will add the messages to the agent's\n", - "history. So you should call this method with new messages.\n", - "**You should not repeatedly call this method with the same messages or the complete history.**\n", - "```\n", - "\n", - "```{note}\n", - "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", - "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", - "By default, the agent will return the result of the tool call as the final response.\n", - "```\n", - "\n", - "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", - "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Multi-Modal Input\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", - "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from io import BytesIO\n", - "\n", - "import PIL\n", - "import requests\n", - "from autogen_agentchat.messages import MultiModalMessage\n", - "from autogen_core import Image\n", - "\n", - "# Create a multi-modal message with random image and text.\n", - "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", - "img = Image(pil_image)\n", - "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", - "img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" - ] - } - ], - "source": [ - "# Use asyncio.run(...) when running in a script.\n", - "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", - "print(response.chat_message.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", - "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Messages\n", - "\n", - "We can also stream each message as it is generated by the agent by using the\n", - "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", - "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", - "as they appear to the console." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", - "[Prompt tokens: 61, Completion tokens: 16]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", - "---------- assistant ----------\n", - "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", - "[Prompt tokens: 93, Completion tokens: 32]\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 154\n", - "Total completion tokens: 48\n", - "Duration: 4.30 seconds\n" - ] - } - ], - "source": [ - "async def assistant_run_stream() -> None:\n", - " # Option 1: read each message from the stream (as shown in the previous example).\n", - " # async for message in agent.on_messages_stream(\n", - " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " # cancellation_token=CancellationToken(),\n", - " # ):\n", - " # print(message)\n", - "\n", - " # Option 2: use Console to print all messages as they appear.\n", - " await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " ),\n", - " output_stats=True, # Enable stats printing.\n", - " )\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", - "await assistant_run_stream()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", - "returns an asynchronous generator that yields each inner message generated by the agent,\n", - "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", - "\n", - "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", - "\n", - "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Tools\n", - "\n", - "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", - "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", - "such as fetching data from APIs or databases.\n", - "\n", - "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", - "(descriptions of tools and their arguments) and generate a tool call message. \n", - "This capability is known as **Tool Calling** or **Function Calling** and \n", - "is becoming a popular pattern in building intelligent agent-based applications.\n", - "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", - "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", - "\n", - "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", - "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", - "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", - "\n", - "```{note}\n", - "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", - "in the Core User Guide.\n", - "```\n", - "\n", - "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", - "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", - "If your tool does not return a well-formed string in natural language, you\n", - "can add a reflection step to have the model summarize the tool's output,\n", - "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", - "\n", - "### Built-in Tools\n", - "\n", - "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", - "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", - "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", - "\n", - "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", - "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", - "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", - "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Function Tool\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", - "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", - "which can be used as a tool by the agent and automatically generates the tool schema\n", - "from the function signature and docstring.\n", - "\n", - "The `web_search_func` tool is an example of a function tool.\n", - "The schema is automatically generated." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'web_search_func',\n", - " 'description': 'Find information on the web',\n", - " 'parameters': {'type': 'object',\n", - " 'properties': {'query': {'description': 'query',\n", - " 'title': 'Query',\n", - " 'type': 'string'}},\n", - " 'required': ['query'],\n", - " 'additionalProperties': False},\n", - " 'strict': False}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core.tools import FunctionTool\n", - "\n", - "\n", - "# Define a tool using a Python function.\n", - "async def web_search_func(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", - "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", - "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", - "# The schema is provided to the model during AssistantAgent's on_messages call.\n", - "web_search_function_tool.schema" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Context Protocol Tools\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", - "served from a Model Context Protocol (MCP) server\n", - "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", - "\n", - "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", - "\n", - "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", - "\n", - "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", - "\n", - "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", - "\n", - "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", - "\n", - "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" - ] - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", - "\n", - "# Get the fetch tool from mcp-server-fetch.\n", - "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", - "tools = await mcp_server_tools(fetch_mcp_server)\n", - "\n", - "# Create an agent that can use the fetch tool.\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", - "\n", - "# Let the agent fetch the content of a URL and summarize it.\n", - "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", - "print(result.messages[-1].content)\n", - "\n", - "# Close the connection to the model client.\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Langchain Tools\n", - "\n", - "You can also use tools from the Langchain library\n", - "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", - "[Prompt tokens: 111, Completion tokens: 22]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", - "---------- assistant ----------\n", - "29.69911764705882\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 111\n", - "Total completion tokens: 22\n", - "Duration: 0.62 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agents\n", + "\n", + "AutoGen AgentChat provides a set of preset Agents, each with variations in how an agent might respond to messages.\n", + "All agents share the following attributes and methods:\n", + "\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` and get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", + "\n", + "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", + "\n", + "\n", + "## Assistant Agent\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", + "uses a language model and has the ability to use tools." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a tool that searches the web for information.\n", + "async def web_search(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Getting Responses\n", + "\n", + "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", + "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" + ] + } + ], + "source": [ + "async def assistant_run() -> None:\n", + " response = await agent.on_messages(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " )\n", + " print(response.inner_messages)\n", + " print(response.chat_message)\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run()) when running in a script.\n", + "await assistant_run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", + "returns a {py:class}`~autogen_agentchat.base.Response`\n", + "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", + "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", + "which stores the agent's \"thought process\" that led to the final response.\n", + "\n", + "```{note}\n", + "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", + "will update the internal state of the agent -- it will add the messages to the agent's\n", + "history. So you should call this method with new messages.\n", + "**You should not repeatedly call this method with the same messages or the complete history.**\n", + "```\n", + "\n", + "```{note}\n", + "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", + "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", + "By default, the agent will return the result of the tool call as the final response.\n", + "```\n", + "\n", + "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", + "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multi-Modal Input\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can handle multi-modal input\n", + "by providing the input as a {py:class}`~autogen_agentchat.messages.MultiModalMessage`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from io import BytesIO\n", + "\n", + "import PIL\n", + "import requests\n", + "from autogen_agentchat.messages import MultiModalMessage\n", + "from autogen_core import Image\n", + "\n", + "# Create a multi-modal message with random image and text.\n", + "pil_image = PIL.Image.open(BytesIO(requests.get(\"https://picsum.photos/300/200\").content))\n", + "img = Image(pil_image)\n", + "multi_modal_message = MultiModalMessage(content=[\"Can you describe the content of this image?\", img], source=\"user\")\n", + "img" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" + ] + } + ], + "source": [ + "# Use asyncio.run(...) when running in a script.\n", + "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", + "print(response.chat_message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", + "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Messages\n", + "\n", + "We can also stream each message as it is generated by the agent by using the\n", + "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", + "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", + "as they appear to the console." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", + "[Prompt tokens: 61, Completion tokens: 16]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", + "---------- assistant ----------\n", + "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", + "[Prompt tokens: 93, Completion tokens: 32]\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 154\n", + "Total completion tokens: 48\n", + "Duration: 4.30 seconds\n" + ] + } + ], + "source": [ + "async def assistant_run_stream() -> None:\n", + " # Option 1: read each message from the stream (as shown in the previous example).\n", + " # async for message in agent.on_messages_stream(\n", + " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " # cancellation_token=CancellationToken(),\n", + " # ):\n", + " # print(message)\n", + "\n", + " # Option 2: use Console to print all messages as they appear.\n", + " await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " ),\n", + " output_stats=True, # Enable stats printing.\n", + " )\n", + "\n", + "\n", + "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", + "await assistant_run_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", + "returns an asynchronous generator that yields each inner message generated by the agent,\n", + "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", + "\n", + "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", + "gather information and responded based on the search results.\n", + "\n", + "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", + "\n", + "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", + "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", + "such as fetching data from APIs or databases.\n", + "\n", + "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", + "(descriptions of tools and their arguments) and generate a tool call message. \n", + "This capability is known as **Tool Calling** or **Function Calling** and \n", + "is becoming a popular pattern in building intelligent agent-based applications.\n", + "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", + "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", + "\n", + "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", + "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", + "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", + "\n", + "```{note}\n", + "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", + "in the Core User Guide.\n", + "```\n", + "\n", + "By default, when {py:class}`~autogen_agentchat.agents.AssistantAgent` executes a tool,\n", + "it will return the tool's output as a string in {py:class}`~autogen_agentchat.messages.ToolCallSummaryMessage` in its response.\n", + "If your tool does not return a well-formed string in natural language, you\n", + "can add a reflection step to have the model summarize the tool's output,\n", + "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", + "\n", + "### Built-in Tools\n", + "\n", + "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", + "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", + "under the `autogen_ext.tools` namespace. For example, you can find the following tools:\n", + "\n", + "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", + "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", + "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", + "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Function Tool\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` automatically\n", + "converts a Python function into a {py:class}`~autogen_core.tools.FunctionTool`\n", + "which can be used as a tool by the agent and automatically generates the tool schema\n", + "from the function signature and docstring.\n", + "\n", + "The `web_search_func` tool is an example of a function tool.\n", + "The schema is automatically generated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'web_search_func',\n", + " 'description': 'Find information on the web',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'query': {'description': 'query',\n", + " 'title': 'Query',\n", + " 'type': 'string'}},\n", + " 'required': ['query'],\n", + " 'additionalProperties': False},\n", + " 'strict': False}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core.tools import FunctionTool\n", + "\n", + "\n", + "# Define a tool using a Python function.\n", + "async def web_search_func(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# This step is automatically performed inside the AssistantAgent if the tool is a Python function.\n", + "web_search_function_tool = FunctionTool(web_search_func, description=\"Find information on the web\")\n", + "# The schema is provided to the model during AssistantAgent's on_messages call.\n", + "web_search_function_tool.schema" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model Context Protocol Tools\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", + "served from a Model Context Protocol (MCP) server\n", + "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", + "\n", + "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", + "\n", + "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", + "\n", + "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", + "\n", + "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", + "\n", + "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", + "\n", + "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" + ] + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", + "\n", + "# Get the fetch tool from mcp-server-fetch.\n", + "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", + "tools = await mcp_server_tools(fetch_mcp_server)\n", + "\n", + "# Create an agent that can use the fetch tool.\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", + "\n", + "# Let the agent fetch the content of a URL and summarize it.\n", + "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", + "print(result.messages[-1].content)\n", + "\n", + "# Close the connection to the model client.\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Langchain Tools\n", + "\n", + "You can also use tools from the Langchain library\n", + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 22]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", + "---------- assistant ----------\n", + "29.69911764705882\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 111\n", + "Total completion tokens: 22\n", + "Duration: 0.62 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "from autogen_ext.tools.langchain import LangChainToolAdapter\n", + "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", + "\n", + "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", + "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(\n", + " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", + ")\n", + "await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", + " ),\n", + " output_stats=True,\n", + ")\n", + "\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parallel Tool Calls\n", + "\n", + "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", + "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", + "will call the tools in parallel.\n", + "\n", + "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", + "when agent behavior needs to be consistent across different models.\n", + "This should be done at the model client level.\n", + "\n", + "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", + "set `parallel_tool_calls=False` to disable parallel tool calls." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " parallel_tool_calls=False, # type: ignore\n", + ")\n", + "agent_no_parallel_tool_call = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client_no_parallel_tool_call,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running an Agent in a Loop\n", + "\n", + "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", + "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", + "an optional reflection.\n", + "\n", + "To run it in a loop, for example, running it until it stops producing\n", + "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Structured Output\n", + "\n", + "Structured output allows models to return structured JSON text with pre-defined schema\n", + "provided by the application. Different from JSON-mode, the schema can be provided\n", + "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", + "class, which can also be used to validate the output. \n", + "\n", + "```{note}\n", + "Structured output is only available for models that support it. It also\n", + "requires the model client to support structured output as well.\n", + "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", + "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", + "support structured output.\n", + "```\n", + "\n", + "Structured output is also useful for incorporating Chain-of-Thought\n", + "reasoning in the agent's responses.\n", + "See the example below for how to use structured output with the assistant agent." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "I am happy.\n", + "---------- assistant ----------\n", + "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from typing import Literal\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "\n", + "# The response format for the agent as a Pydantic base model.\n", + "class AgentResponse(BaseModel):\n", + " thoughts: str\n", + " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " response_format=AgentResponse, # type: ignore\n", + ")\n", + "agent = AssistantAgent(\n", + " \"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", + ")\n", + "\n", + "await Console(agent.run_stream(task=\"I am happy.\"))\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming Tokens\n", + "\n", + "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", + "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", + "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", + "\n", + "The underlying model API must support streaming tokens for this to work.\n", + "Please check with your model provider to see if this is supported." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" + ] + } + ], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "streaming_assistant = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful assistant.\",\n", + " model_client_stream=True, # Enable streaming tokens.\n", + ")\n", + "\n", + "# Use an async function and asyncio.run() in a script.\n", + "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", + " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + "):\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the streaming chunks in the output above.\n", + "The chunks are generated by the model client and are yielded by the agent as they are received.\n", + "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", + "\n", + "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", + "followed by a full text message right after the last chunk." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", + "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" + ] + } + ], + "source": [ + "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", + " print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Model Context\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", + "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", + "object. This allows the agent to use different model contexts, such as\n", + "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", + "limit the context sent to the model.\n", + "\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", + "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", + "which sends the full conversation history to the model. To limit the context\n", + "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import BufferedChatCompletionContext\n", + "\n", + "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Other Preset Agents\n", + "\n", + "The following preset agents are available:\n", + "\n", + "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", + "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", + "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", + "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", + "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", + "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Step\n", + "\n", + "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } }, - { - "data": { - "text/plain": [ - "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import pandas as pd\n", - "from autogen_ext.tools.langchain import LangChainToolAdapter\n", - "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", - "\n", - "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", - "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(\n", - " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", - ")\n", - "await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", - " ),\n", - " output_stats=True,\n", - ")\n", - "\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Parallel Tool Calls\n", - "\n", - "Some models support parallel tool calls, which can be useful for tasks that require multiple tools to be called simultaneously.\n", - "By default, if the model client produces multiple tool calls, {py:class}`~autogen_agentchat.agents.AssistantAgent`\n", - "will call the tools in parallel.\n", - "\n", - "You may want to disable parallel tool calls when the tools have side effects that may interfere with each other, or,\n", - "when agent behavior needs to be consistent across different models.\n", - "This should be done at the model client level.\n", - "\n", - "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", - "set `parallel_tool_calls=False` to disable parallel tool calls." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_client_no_parallel_tool_call = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " parallel_tool_calls=False, # type: ignore\n", - ")\n", - "agent_no_parallel_tool_call = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client_no_parallel_tool_call,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running an Agent in a Loop\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.AssistantAgent` executes one\n", - "step at a time: one model call, followed by one tool call (or parallel tool calls), and then\n", - "an optional reflection.\n", - "\n", - "To run it in a loop, for example, running it until it stops producing\n", - "tool calls, please refer to [Single-Agent Team](./teams.ipynb#single-agent-team)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Structured Output\n", - "\n", - "Structured output allows models to return structured JSON text with pre-defined schema\n", - "provided by the application. Different from JSON-mode, the schema can be provided\n", - "as a [Pydantic BaseModel](https://docs.pydantic.dev/latest/concepts/models/)\n", - "class, which can also be used to validate the output. \n", - "\n", - "```{note}\n", - "Structured output is only available for models that support it. It also\n", - "requires the model client to support structured output as well.\n", - "Currently, the {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient`\n", - "and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`\n", - "support structured output.\n", - "```\n", - "\n", - "Structured output is also useful for incorporating Chain-of-Thought\n", - "reasoning in the agent's responses.\n", - "See the example below for how to use structured output with the assistant agent." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "I am happy.\n", - "---------- assistant ----------\n", - "{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='I am happy.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=89, completion_tokens=18), content='{\"thoughts\":\"The user explicitly states that they are happy.\",\"response\":\"happy\"}', type='TextMessage')], stop_reason=None)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from typing import Literal\n", - "\n", - "from pydantic import BaseModel\n", - "\n", - "\n", - "# The response format for the agent as a Pydantic base model.\n", - "class AgentResponse(BaseModel):\n", - " thoughts: str\n", - " response: Literal[\"happy\", \"sad\", \"neutral\"]\n", - "\n", - "\n", - "# Create an agent that uses the OpenAI GPT-4o model with the custom response format.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", - " response_format=AgentResponse, # type: ignore\n", - ")\n", - "agent = AssistantAgent(\n", - " \"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n", - ")\n", - "\n", - "await Console(agent.run_stream(task=\"I am happy.\"))\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Streaming Tokens\n", - "\n", - "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", - "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", - "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", - "\n", - "The underlying model API must support streaming tokens for this to work.\n", - "Please check with your model provider to see if this is supported." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" - ] - } - ], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - "streaming_assistant = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful assistant.\",\n", - " model_client_stream=True, # Enable streaming tokens.\n", - ")\n", - "\n", - "# Use an async function and asyncio.run() in a script.\n", - "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", - " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - "):\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see the streaming chunks in the output above.\n", - "The chunks are generated by the model client and are yielded by the agent as they are received.\n", - "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", - "\n", - "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", - "followed by a full text message right after the last chunk." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" - ] - } - ], - "source": [ - "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", - " print(message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using Model Context\n", - "\n", - "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", - "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", - "object. This allows the agent to use different model contexts, such as\n", - "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", - "limit the context sent to the model.\n", - "\n", - "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", - "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", - "which sends the full conversation history to the model. To limit the context\n", - "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.model_context import BufferedChatCompletionContext\n", - "\n", - "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", - "agent = AssistantAgent(\n", - " name=\"assistant\",\n", - " model_client=model_client,\n", - " tools=[web_search],\n", - " system_message=\"Use tools to solve tasks.\",\n", - " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Other Preset Agents\n", - "\n", - "The following preset agents are available:\n", - "\n", - "- {py:class}`~autogen_agentchat.agents.UserProxyAgent`: An agent that takes user input returns it as responses.\n", - "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", - "- {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", - "- {py:class}`~autogen_ext.agents.web_surfer.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information.\n", - "- {py:class}`~autogen_ext.agents.file_surfer.FileSurfer`: An agent that can search and browse local files for information.\n", - "- {py:class}`~autogen_ext.agents.video_surfer.VideoSurfer`: An agent that can watch videos for information." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Step\n", - "\n", - "Having explored the usage of the {py:class}`~autogen_agentchat.agents.AssistantAgent`, we can now proceed to the next section to learn about the teams feature in AgentChat.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb index 5fd628ac1dd2..05e799eb1821 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/state.ipynb @@ -1,359 +1,359 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Managing State \n", - "\n", - "So far, we have discussed how to build components in a multi-agent application - agents, teams, termination conditions. In many cases, it is useful to save the state of these components to disk and load them back later. This is particularly useful in a web application where stateless endpoints respond to requests and need to load the state of the application from persistent storage.\n", - "\n", - "In this notebook, we will discuss how to save and load the state of agents, teams, and termination conditions. \n", - " \n", - "\n", - "## Saving and Loading Agents\n", - "\n", - "We can get the state of an agent by calling {py:meth}`~autogen_agentchat.agents.AssistantAgent.save_state` method on \n", - "an {py:class}`~autogen_agentchat.agents.AssistantAgent`. " - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In Tanganyika's embrace so wide and deep, \n", - "Ancient waters cradle secrets they keep, \n", - "Echoes of time where horizons sleep. \n" - ] - } - ], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.messages import TextMessage\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", - "\n", - "assistant_agent = AssistantAgent(\n", - " name=\"assistant_agent\",\n", - " system_message=\"You are a helpful assistant\",\n", - " model_client=model_client,\n", - ")\n", - "\n", - "# Use asyncio.run(...) when running in a script.\n", - "response = await assistant_agent.on_messages(\n", - " [TextMessage(content=\"Write a 3 line poem on lake tangayika\", source=\"user\")], CancellationToken()\n", - ")\n", - "print(response.chat_message.content)\n", - "await model_client.close()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'type': 'AssistantAgentState', 'version': '1.0.0', 'llm_messages': [{'content': 'Write a 3 line poem on lake tangayika', 'source': 'user', 'type': 'UserMessage'}, {'content': \"In Tanganyika's embrace so wide and deep, \\nAncient waters cradle secrets they keep, \\nEchoes of time where horizons sleep. \", 'source': 'assistant_agent', 'type': 'AssistantMessage'}]}\n" - ] - } - ], - "source": [ - "agent_state = await assistant_agent.save_state()\n", - "print(agent_state)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The last line of the poem was: \"Echoes of time where horizons sleep.\"\n" - ] - } - ], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", - "\n", - "new_assistant_agent = AssistantAgent(\n", - " name=\"assistant_agent\",\n", - " system_message=\"You are a helpful assistant\",\n", - " model_client=model_client,\n", - ")\n", - "await new_assistant_agent.load_state(agent_state)\n", - "\n", - "# Use asyncio.run(...) when running in a script.\n", - "response = await new_assistant_agent.on_messages(\n", - " [TextMessage(content=\"What was the last line of the previous poem you wrote\", source=\"user\")], CancellationToken()\n", - ")\n", - "print(response.chat_message.content)\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "For {py:class}`~autogen_agentchat.agents.AssistantAgent`, its state consists of the model_context.\n", - "If your write your own custom agent, consider overriding the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.save_state` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.load_state` methods to customize the behavior. The default implementations save and load an empty state.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Saving and Loading Teams \n", - "\n", - "We can get the state of a team by calling `save_state` method on the team and load it back by calling `load_state` method on the team. \n", - "\n", - "When we call `save_state` on a team, it saves the state of all the agents in the team.\n", - "\n", - "We will begin by creating a simple {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team with a single agent and ask it to write a poem. " - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Write a beautiful poem 3-line about lake tangayika\n", - "---------- assistant_agent ----------\n", - "In Tanganyika's gleam, beneath the azure skies, \n", - "Whispers of ancient waters, in tranquil guise, \n", - "Nature's mirror, where dreams and serenity lie.\n", - "[Prompt tokens: 29, Completion tokens: 34]\n", - "---------- Summary ----------\n", - "Number of messages: 2\n", - "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", - "Total prompt tokens: 29\n", - "Total completion tokens: 34\n", - "Duration: 0.71 seconds\n" - ] - } - ], - "source": [ - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", - "\n", - "# Define a team.\n", - "assistant_agent = AssistantAgent(\n", - " name=\"assistant_agent\",\n", - " system_message=\"You are a helpful assistant\",\n", - " model_client=model_client,\n", - ")\n", - "agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n", - "\n", - "# Run the team and stream messages to the console.\n", - "stream = agent_team.run_stream(task=\"Write a beautiful poem 3-line about lake tangayika\")\n", - "\n", - "# Use asyncio.run(...) when running in a script.\n", - "await Console(stream)\n", - "\n", - "# Save the state of the agent team.\n", - "team_state = await agent_team.save_state()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we reset the team (simulating instantiation of the team), and ask the question `What was the last line of the poem you wrote?`, we see that the team is unable to accomplish this as there is no reference to the previous run." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What was the last line of the poem you wrote?\n", - "---------- assistant_agent ----------\n", - "I'm sorry, but I am unable to recall or access previous interactions, including any specific poem I may have composed in our past conversations. If you like, I can write a new poem for you.\n", - "[Prompt tokens: 28, Completion tokens: 40]\n", - "---------- Summary ----------\n", - "Number of messages: 2\n", - "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", - "Total prompt tokens: 28\n", - "Total completion tokens: 40\n", - "Duration: 0.70 seconds\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Managing State \n", + "\n", + "So far, we have discussed how to build components in a multi-agent application - agents, teams, termination conditions. In many cases, it is useful to save the state of these components to disk and load them back later. This is particularly useful in a web application where stateless endpoints respond to requests and need to load the state of the application from persistent storage.\n", + "\n", + "In this notebook, we will discuss how to save and load the state of agents, teams, and termination conditions. \n", + " \n", + "\n", + "## Saving and Loading Agents\n", + "\n", + "We can get the state of an agent by calling {py:meth}`~autogen_agentchat.agents.AssistantAgent.save_state` method on \n", + "an {py:class}`~autogen_agentchat.agents.AssistantAgent`. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In Tanganyika's embrace so wide and deep, \n", + "Ancient waters cradle secrets they keep, \n", + "Echoes of time where horizons sleep. \n" + ] + } + ], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", + "\n", + "assistant_agent = AssistantAgent(\n", + " name=\"assistant_agent\",\n", + " system_message=\"You are a helpful assistant\",\n", + " model_client=model_client,\n", + ")\n", + "\n", + "# Use asyncio.run(...) when running in a script.\n", + "response = await assistant_agent.on_messages(\n", + " [TextMessage(content=\"Write a 3 line poem on lake tangayika\", source=\"user\")], CancellationToken()\n", + ")\n", + "print(response.chat_message)\n", + "await model_client.close()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'type': 'AssistantAgentState', 'version': '1.0.0', 'llm_messages': [{'content': 'Write a 3 line poem on lake tangayika', 'source': 'user', 'type': 'UserMessage'}, {'content': \"In Tanganyika's embrace so wide and deep, \\nAncient waters cradle secrets they keep, \\nEchoes of time where horizons sleep. \", 'source': 'assistant_agent', 'type': 'AssistantMessage'}]}\n" + ] + } + ], + "source": [ + "agent_state = await assistant_agent.save_state()\n", + "print(agent_state)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The last line of the poem was: \"Echoes of time where horizons sleep.\"\n" + ] + } + ], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", + "\n", + "new_assistant_agent = AssistantAgent(\n", + " name=\"assistant_agent\",\n", + " system_message=\"You are a helpful assistant\",\n", + " model_client=model_client,\n", + ")\n", + "await new_assistant_agent.load_state(agent_state)\n", + "\n", + "# Use asyncio.run(...) when running in a script.\n", + "response = await new_assistant_agent.on_messages(\n", + " [TextMessage(content=\"What was the last line of the previous poem you wrote\", source=\"user\")], CancellationToken()\n", + ")\n", + "print(response.chat_message)\n", + "await model_client.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "For {py:class}`~autogen_agentchat.agents.AssistantAgent`, its state consists of the model_context.\n", + "If your write your own custom agent, consider overriding the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.save_state` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.load_state` methods to customize the behavior. The default implementations save and load an empty state.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Saving and Loading Teams \n", + "\n", + "We can get the state of a team by calling `save_state` method on the team and load it back by calling `load_state` method on the team. \n", + "\n", + "When we call `save_state` on a team, it saves the state of all the agents in the team.\n", + "\n", + "We will begin by creating a simple {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team with a single agent and ask it to write a poem. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Write a beautiful poem 3-line about lake tangayika\n", + "---------- assistant_agent ----------\n", + "In Tanganyika's gleam, beneath the azure skies, \n", + "Whispers of ancient waters, in tranquil guise, \n", + "Nature's mirror, where dreams and serenity lie.\n", + "[Prompt tokens: 29, Completion tokens: 34]\n", + "---------- Summary ----------\n", + "Number of messages: 2\n", + "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", + "Total prompt tokens: 29\n", + "Total completion tokens: 34\n", + "Duration: 0.71 seconds\n" + ] + } + ], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", + "\n", + "# Define a team.\n", + "assistant_agent = AssistantAgent(\n", + " name=\"assistant_agent\",\n", + " system_message=\"You are a helpful assistant\",\n", + " model_client=model_client,\n", + ")\n", + "agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n", + "\n", + "# Run the team and stream messages to the console.\n", + "stream = agent_team.run_stream(task=\"Write a beautiful poem 3-line about lake tangayika\")\n", + "\n", + "# Use asyncio.run(...) when running in a script.\n", + "await Console(stream)\n", + "\n", + "# Save the state of the agent team.\n", + "team_state = await agent_team.save_state()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we reset the team (simulating instantiation of the team), and ask the question `What was the last line of the poem you wrote?`, we see that the team is unable to accomplish this as there is no reference to the previous run." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What was the last line of the poem you wrote?\n", + "---------- assistant_agent ----------\n", + "I'm sorry, but I am unable to recall or access previous interactions, including any specific poem I may have composed in our past conversations. If you like, I can write a new poem for you.\n", + "[Prompt tokens: 28, Completion tokens: 40]\n", + "---------- Summary ----------\n", + "Number of messages: 2\n", + "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", + "Total prompt tokens: 28\n", + "Total completion tokens: 40\n", + "Duration: 0.70 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=28, completion_tokens=40), content=\"I'm sorry, but I am unable to recall or access previous interactions, including any specific poem I may have composed in our past conversations. If you like, I can write a new poem for you.\", type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agent_team.reset()\n", + "stream = agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", + "await Console(stream)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we load the state of the team and ask the same question. We see that the team is able to accurately return the last line of the poem it wrote." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'type': 'TeamState', 'version': '1.0.0', 'agent_states': {'group_chat_manager/a55364ad-86fd-46ab-9449-dcb5260b1e06': {'type': 'RoundRobinManagerState', 'version': '1.0.0', 'message_thread': [{'source': 'user', 'models_usage': None, 'content': 'Write a beautiful poem 3-line about lake tangayika', 'type': 'TextMessage'}, {'source': 'assistant_agent', 'models_usage': {'prompt_tokens': 29, 'completion_tokens': 34}, 'content': \"In Tanganyika's gleam, beneath the azure skies, \\nWhispers of ancient waters, in tranquil guise, \\nNature's mirror, where dreams and serenity lie.\", 'type': 'TextMessage'}], 'current_turn': 0, 'next_speaker_index': 0}, 'collect_output_messages/a55364ad-86fd-46ab-9449-dcb5260b1e06': {}, 'assistant_agent/a55364ad-86fd-46ab-9449-dcb5260b1e06': {'type': 'ChatAgentContainerState', 'version': '1.0.0', 'agent_state': {'type': 'AssistantAgentState', 'version': '1.0.0', 'llm_messages': [{'content': 'Write a beautiful poem 3-line about lake tangayika', 'source': 'user', 'type': 'UserMessage'}, {'content': \"In Tanganyika's gleam, beneath the azure skies, \\nWhispers of ancient waters, in tranquil guise, \\nNature's mirror, where dreams and serenity lie.\", 'source': 'assistant_agent', 'type': 'AssistantMessage'}]}, 'message_buffer': []}}, 'team_id': 'a55364ad-86fd-46ab-9449-dcb5260b1e06'}\n", + "---------- user ----------\n", + "What was the last line of the poem you wrote?\n", + "---------- assistant_agent ----------\n", + "The last line of the poem I wrote is: \n", + "\"Nature's mirror, where dreams and serenity lie.\"\n", + "[Prompt tokens: 86, Completion tokens: 22]\n", + "---------- Summary ----------\n", + "Number of messages: 2\n", + "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", + "Total prompt tokens: 86\n", + "Total completion tokens: 22\n", + "Duration: 0.96 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=22), content='The last line of the poem I wrote is: \\n\"Nature\\'s mirror, where dreams and serenity lie.\"', type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(team_state)\n", + "\n", + "# Load team state.\n", + "await agent_team.load_state(team_state)\n", + "stream = agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", + "await Console(stream)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Persisting State (File or Database)\n", + "\n", + "In many cases, we may want to persist the state of the team to disk (or a database) and load it back later. State is a dictionary that can be serialized to a file or written to a database." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What was the last line of the poem you wrote?\n", + "---------- assistant_agent ----------\n", + "The last line of the poem I wrote is: \n", + "\"Nature's mirror, where dreams and serenity lie.\"\n", + "[Prompt tokens: 86, Completion tokens: 22]\n", + "---------- Summary ----------\n", + "Number of messages: 2\n", + "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", + "Total prompt tokens: 86\n", + "Total completion tokens: 22\n", + "Duration: 0.72 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=22), content='The last line of the poem I wrote is: \\n\"Nature\\'s mirror, where dreams and serenity lie.\"', type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import json\n", + "\n", + "## save state to disk\n", + "\n", + "with open(\"coding/team_state.json\", \"w\") as f:\n", + " json.dump(team_state, f)\n", + "\n", + "## load state from disk\n", + "with open(\"coding/team_state.json\", \"r\") as f:\n", + " team_state = json.load(f)\n", + "\n", + "new_agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n", + "await new_agent_team.load_state(team_state)\n", + "stream = new_agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", + "await Console(stream)\n", + "await model_client.close()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=28, completion_tokens=40), content=\"I'm sorry, but I am unable to recall or access previous interactions, including any specific poem I may have composed in our past conversations. If you like, I can write a new poem for you.\", type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await agent_team.reset()\n", - "stream = agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", - "await Console(stream)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we load the state of the team and ask the same question. We see that the team is able to accurately return the last line of the poem it wrote." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'type': 'TeamState', 'version': '1.0.0', 'agent_states': {'group_chat_manager/a55364ad-86fd-46ab-9449-dcb5260b1e06': {'type': 'RoundRobinManagerState', 'version': '1.0.0', 'message_thread': [{'source': 'user', 'models_usage': None, 'content': 'Write a beautiful poem 3-line about lake tangayika', 'type': 'TextMessage'}, {'source': 'assistant_agent', 'models_usage': {'prompt_tokens': 29, 'completion_tokens': 34}, 'content': \"In Tanganyika's gleam, beneath the azure skies, \\nWhispers of ancient waters, in tranquil guise, \\nNature's mirror, where dreams and serenity lie.\", 'type': 'TextMessage'}], 'current_turn': 0, 'next_speaker_index': 0}, 'collect_output_messages/a55364ad-86fd-46ab-9449-dcb5260b1e06': {}, 'assistant_agent/a55364ad-86fd-46ab-9449-dcb5260b1e06': {'type': 'ChatAgentContainerState', 'version': '1.0.0', 'agent_state': {'type': 'AssistantAgentState', 'version': '1.0.0', 'llm_messages': [{'content': 'Write a beautiful poem 3-line about lake tangayika', 'source': 'user', 'type': 'UserMessage'}, {'content': \"In Tanganyika's gleam, beneath the azure skies, \\nWhispers of ancient waters, in tranquil guise, \\nNature's mirror, where dreams and serenity lie.\", 'source': 'assistant_agent', 'type': 'AssistantMessage'}]}, 'message_buffer': []}}, 'team_id': 'a55364ad-86fd-46ab-9449-dcb5260b1e06'}\n", - "---------- user ----------\n", - "What was the last line of the poem you wrote?\n", - "---------- assistant_agent ----------\n", - "The last line of the poem I wrote is: \n", - "\"Nature's mirror, where dreams and serenity lie.\"\n", - "[Prompt tokens: 86, Completion tokens: 22]\n", - "---------- Summary ----------\n", - "Number of messages: 2\n", - "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", - "Total prompt tokens: 86\n", - "Total completion tokens: 22\n", - "Duration: 0.96 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=22), content='The last line of the poem I wrote is: \\n\"Nature\\'s mirror, where dreams and serenity lie.\"', type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "print(team_state)\n", - "\n", - "# Load team state.\n", - "await agent_team.load_state(team_state)\n", - "stream = agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", - "await Console(stream)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Persisting State (File or Database)\n", - "\n", - "In many cases, we may want to persist the state of the team to disk (or a database) and load it back later. State is a dictionary that can be serialized to a file or written to a database." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "What was the last line of the poem you wrote?\n", - "---------- assistant_agent ----------\n", - "The last line of the poem I wrote is: \n", - "\"Nature's mirror, where dreams and serenity lie.\"\n", - "[Prompt tokens: 86, Completion tokens: 22]\n", - "---------- Summary ----------\n", - "Number of messages: 2\n", - "Finish reason: Maximum number of messages 2 reached, current message count: 2\n", - "Total prompt tokens: 86\n", - "Total completion tokens: 22\n", - "Duration: 0.72 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What was the last line of the poem you wrote?', type='TextMessage'), TextMessage(source='assistant_agent', models_usage=RequestUsage(prompt_tokens=86, completion_tokens=22), content='The last line of the poem I wrote is: \\n\"Nature\\'s mirror, where dreams and serenity lie.\"', type='TextMessage')], stop_reason='Maximum number of messages 2 reached, current message count: 2')" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import json\n", - "\n", - "## save state to disk\n", - "\n", - "with open(\"coding/team_state.json\", \"w\") as f:\n", - " json.dump(team_state, f)\n", - "\n", - "## load state from disk\n", - "with open(\"coding/team_state.json\", \"r\") as f:\n", - " team_state = json.load(f)\n", - "\n", - "new_agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n", - "await new_agent_team.load_state(team_state)\n", - "stream = new_agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n", - "await Console(stream)\n", - "await model_client.close()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index e8e34a73b420..7f18ac2bce3e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -133,7 +133,7 @@ " response = await self._delegate.on_messages(\n", " [TextMessage(content=message.content, source=\"user\")], ctx.cancellation_token\n", " )\n", - " print(f\"{self.id.type} responded: {response.chat_message.content}\")" + " print(f\"{self.id.type} responded: {response.chat_message}\")" ] }, { From fe0f05b591a728c94cdd2dcfe747b0a497effad2 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 13:59:33 -0700 Subject: [PATCH 24/26] lint --- .../src/autogen_agentchat/agents/_user_proxy_agent.py | 2 ++ .../src/user-guide/agentchat-user-guide/custom-agents.ipynb | 6 +++--- .../src/user-guide/agentchat-user-guide/migration-guide.md | 4 ++-- .../docs/src/user-guide/agentchat-user-guide/tracing.ipynb | 2 +- .../user-guide/agentchat-user-guide/tutorial/agents.ipynb | 1 + .../autogen-ext/src/autogen_ext/tools/http/_http_tool.py | 2 +- .../autogen-ext/src/autogen_ext/tools/mcp/_factory.py | 2 +- python/samples/agentchat_chess_game/main.py | 6 +++--- python/samples/agentchat_streamlit/agent.py | 2 +- 9 files changed, 15 insertions(+), 12 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 3ca0ec890324..221832a8a70e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -82,6 +82,7 @@ async def simple_user_agent(): cancellation_token=CancellationToken(), ) ) + assert isinstance(response.chat_message, TextMessage) print(f"Your name is {response.chat_message.content}") Example: @@ -117,6 +118,7 @@ async def cancellable_user_agent(): ) ) response = await agent_task + assert isinstance(response.chat_message, TextMessage) print(f"Your name is {response.chat_message.content}") except Exception as e: print(f"Exception: {e}") diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index d11e5d1fc15d..58746cb700c2 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -363,7 +363,7 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " await self._model_context.add_message(msg.content_to_model_message())\n", "\n", " # Get conversation history\n", " history = [\n", @@ -541,7 +541,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -602,7 +602,7 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(UserMessage(content=msg.content, source=msg.source))\n", + " await self._model_context.add_message(msg.content_to_model_message())\n", "\n", " # Get conversation history\n", " history = [\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index d0533435fe42..3dc779c0f217 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -691,7 +691,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content) + print("Assistant:", response.chat_message.content_to_text()) await model_client.close() asyncio.run(main()) @@ -1331,7 +1331,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content) + print("Assistant:", response.chat_message.content_to_text()) await model_client.close() diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb index e4dc8411c3a3..4bdaf1168efd 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb @@ -357,7 +357,7 @@ " async for response in response_stream:\n", " async for response in response_stream:\n", " if not isinstance(response, TaskResult):\n", - " print(f\"\\n-- {response.source} -- : {response.content}\")\n", + " print(f\"\\n-- {response.source} -- : {response.content_to_text()}\")\n", " with tracer.start_as_current_span(f\"agent_message.{response.source}\") as message_span:\n", " message_span.set_attribute(\"agent.name\", response.source)\n", " message_span.set_attribute(\"message.content\", response.content_to_text())\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 64777d7b38af..c927e775ad46 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -423,6 +423,7 @@ "\n", "# Let the agent fetch the content of a URL and summarize it.\n", "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", + "assert isinstance(result.messages[-1], TextMessage)\n", "print(result.messages[-1].content)\n", "\n", "# Close the connection to the model client.\n", diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py index 451d5826bad7..9cbd600abc7c 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/http/_http_tool.py @@ -126,7 +126,7 @@ async def main(): [TextMessage(content="Can you base64 decode the value 'YWJjZGU=', please?", source="user")], CancellationToken(), ) - print(response.chat_message.content) + print(response.chat_message) asyncio.run(main()) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py index 3eb8634b3698..3b8c2356b79f 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/mcp/_factory.py @@ -105,7 +105,7 @@ async def main() -> None: # Let the agent fetch the content of a URL and summarize it. result = await agent.run(task="Summarize the content of https://en.wikipedia.org/wiki/Seattle") - print(result.messages[-1].content) + print(result.messages[-1]) asyncio.run(main()) diff --git a/python/samples/agentchat_chess_game/main.py b/python/samples/agentchat_chess_game/main.py index e12db1d94778..914659cf5899 100644 --- a/python/samples/agentchat_chess_game/main.py +++ b/python/samples/agentchat_chess_game/main.py @@ -1,5 +1,6 @@ import argparse import asyncio +from autogen_agentchat.messages import TextMessage import yaml import random @@ -78,11 +79,10 @@ async def get_ai_move(board: chess.Board, player: AssistantAgent, max_tries: int while count < max_tries: result = await Console(player.run_stream(task=task)) count += 1 - response = result.messages[-1].content - assert isinstance(response, str) + assert isinstance(result.messages[-1], TextMessage) # Check if the response is a valid UC move. try: - move = chess.Move.from_uci(extract_move(response)) + move = chess.Move.from_uci(extract_move(result.messages[-1].content)) except (ValueError, IndexError): task = "Invalid format. Please read instruction.\n" + get_ai_prompt(board) continue diff --git a/python/samples/agentchat_streamlit/agent.py b/python/samples/agentchat_streamlit/agent.py index cbe588828838..acf2f9ed52f4 100644 --- a/python/samples/agentchat_streamlit/agent.py +++ b/python/samples/agentchat_streamlit/agent.py @@ -22,5 +22,5 @@ async def chat(self, prompt: str) -> str: [TextMessage(content=prompt, source="user")], CancellationToken(), ) - assert isinstance(response.chat_message.content, str) + assert isinstance(response.chat_message, TextMessage) return response.chat_message.content From 2f843a3ab2e30818d17f062e1c8df3de2a19589b Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 14:05:48 -0700 Subject: [PATCH 25/26] rename --- .../agents/_assistant_agent.py | 2 +- .../agents/_society_of_mind_agent.py | 2 +- .../conditions/_terminations.py | 2 +- .../src/autogen_agentchat/messages.py | 50 +++++++++---------- .../_magentic_one_orchestrator.py | 2 +- .../teams/_group_chat/_selector_group_chat.py | 2 +- .../src/autogen_agentchat/ui/_console.py | 10 ++-- .../tests/test_group_chat.py | 6 +-- .../tests/test_magentic_one_group_chat.py | 4 +- .../agents/file_surfer/_file_surfer.py | 2 +- .../agents/openai/_openai_assistant_agent.py | 2 +- .../web_surfer/_multimodal_web_surfer.py | 2 +- .../task_centric_memory/utils/apprentice.py | 4 +- .../task_centric_memory/utils/page_logger.py | 2 +- .../src/autogen_ext/ui/_rich_console.py | 2 +- 15 files changed, 47 insertions(+), 47 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 7a6fdff1f6ba..ed797109c1c6 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -819,7 +819,7 @@ async def _add_messages_to_context( if isinstance(msg, HandoffMessage): for llm_msg in msg.context: await model_context.add_message(llm_msg) - await model_context.add_message(msg.content_to_model_message()) + await model_context.add_message(msg.to_model_message()) @staticmethod async def _update_model_context_with_memory( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py index d81a1889a0e6..ac8f539653e3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py @@ -168,7 +168,7 @@ async def on_messages_stream( llm_messages: List[LLMMessage] = [SystemMessage(content=self._instruction)] for message in messages: if isinstance(message, ChatMessage): - llm_messages.append(message.content_to_model_message()) + llm_messages.append(message.to_model_message()) llm_messages.append(SystemMessage(content=self._response_prompt)) completion = await self._model_client.create(messages=llm_messages, cancellation_token=cancellation_token) assert isinstance(completion.content, str) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index 3a521377ee6a..c4ff24836abd 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -136,7 +136,7 @@ async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMe if self._sources is not None and message.source not in self._sources: continue - content = message.content_to_text() + content = message.to_text() if self._termination_text in content: self._terminated = True return StopMessage( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 0b9a980d43bc..e50b80c59ffd 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -77,31 +77,31 @@ class ChatMessage(BaseMessage, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod - def content_to_text(self) -> str: + def to_text(self) -> str: """Convert the content of the message to a string-only representation - that can be rendered in the console and inspected by the user. + that can be rendered in the console and inspected by the user or conditions. This is not used for creating text-only content for models. - For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" + For :class:`ChatMessage` types, use :meth:`to_model_text` instead.""" ... @abstractmethod - def content_to_model_text(self) -> str: + def to_model_text(self) -> str: """Convert the content of the message to text-only representation. This is used for creating text-only content for models. This is not used for rendering the message in console. For that, use - :meth:`~BaseMessage.content_to_text`. + :meth:`~BaseMessage.to_text`. - The difference between this and :meth:`content_to_model_message` is that this + The difference between this and :meth:`to_model_message` is that this is used to construct parts of the a message for the model client, - while :meth:`content_to_model_message` is used to create a complete message + while :meth:`to_model_message` is used to create a complete message for the model client. """ ... @abstractmethod - def content_to_model_message(self) -> UserMessage: + def to_model_message(self) -> UserMessage: """Convert the message content to a :class:`~autogen_core.models.UserMessage` for use with model client, e.g., :class:`~autogen_core.models.ChatCompletionClient`.""" ... @@ -118,13 +118,13 @@ class TextChatMessage(ChatMessage, ABC): content: str """The content of the message.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return self.content - def content_to_model_text(self) -> str: + def to_model_text(self) -> str: return self.content - def content_to_model_message(self) -> UserMessage: + def to_model_message(self) -> UserMessage: return UserMessage(content=self.content, source=self.source) @@ -156,12 +156,12 @@ class AgentEvent(BaseMessage, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod - def content_to_text(self) -> str: + def to_text(self) -> str: """Convert the content of the message to a string-only representation that can be rendered in the console and inspected by the user. This is not used for creating text-only content for models. - For :class:`ChatMessage` types, use :meth:`content_to_model_text` instead.""" + For :class:`ChatMessage` types, use :meth:`to_model_text` instead.""" ... @@ -199,13 +199,13 @@ class MyMessageContent(BaseModel): """The content of the message. Must be a subclass of `Pydantic BaseModel `_.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return self.content.model_dump_json(indent=2) - def content_to_model_text(self) -> str: + def to_model_text(self) -> str: return self.content.model_dump_json() - def content_to_model_message(self) -> UserMessage: + def to_model_message(self) -> UserMessage: return UserMessage( content=self.content.model_dump_json(), source=self.source, @@ -224,7 +224,7 @@ class MultiModalMessage(ChatMessage): content: List[str | Image] """The content of the message.""" - def content_to_model_text(self, image_placeholder: str | None = "[image]") -> str: + def to_model_text(self, image_placeholder: str | None = "[image]") -> str: """Convert the content of the message to a string-only representation. If an image is present, it will be replaced with the image placeholder by default, otherwise it will be a base64 string when set to None. @@ -240,7 +240,7 @@ def content_to_model_text(self, image_placeholder: str | None = "[image]") -> st text += f" {c.to_base64()}" return text - def content_to_text(self, iterm: bool = False) -> str: + def to_text(self, iterm: bool = False) -> str: result: List[str] = [] for c in self.content: if isinstance(c, str): @@ -254,7 +254,7 @@ def content_to_text(self, iterm: bool = False) -> str: result.append("") return "\n".join(result) - def content_to_model_message(self) -> UserMessage: + def to_model_message(self) -> UserMessage: return UserMessage(content=self.content, source=self.source) @@ -286,7 +286,7 @@ class ToolCallRequestEvent(AgentEvent): content: List[FunctionCall] """The tool calls.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return str(self.content) @@ -296,7 +296,7 @@ class ToolCallExecutionEvent(AgentEvent): content: List[FunctionExecutionResult] """The tool call results.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return str(self.content) @@ -309,7 +309,7 @@ class UserInputRequestedEvent(AgentEvent): content: Literal[""] = "" """Empty content for compat with consumers expecting a content field.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return str(self.content) @@ -319,7 +319,7 @@ class MemoryQueryEvent(AgentEvent): content: List[MemoryContent] """The memory query results.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return str(self.content) @@ -329,7 +329,7 @@ class ModelClientStreamingChunkEvent(AgentEvent): content: str """A string chunk from the model client.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return self.content @@ -341,7 +341,7 @@ class ThoughtEvent(AgentEvent): content: str """The thought process of the model.""" - def content_to_text(self) -> str: + def to_text(self) -> str: return self.content diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index eb6d9c979e83..d442c8acb6bf 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -150,7 +150,7 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No # Create the initial task ledger ################################# # Combine all message contents for task - self._task = " ".join([msg.content_to_model_text() for msg in message.messages]) + self._task = " ".join([msg.to_model_text() for msg in message.messages]) planning_conversation: List[LLMMessage] = [] # 1. GATHER FACTS diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 8bb775d5d443..03587920d389 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -156,7 +156,7 @@ async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str: if not isinstance(msg, ChatMessage): # Only process chat messages. continue - message = f"{msg.source}: {msg.content_to_model_text()}" + message = f"{msg.source}: {msg.to_model_text()}" history_messages.append( message.rstrip() + "\n\n" ) # Create some consistency for how messages are separated in the transcript diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py index 5be9ce3fcf31..524ee93a8315 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/ui/_console.py @@ -136,9 +136,9 @@ async def Console( # Print final response. if isinstance(message.chat_message, MultiModalMessage): - final_content = message.chat_message.content_to_text(iterm=render_image_iterm) + final_content = message.chat_message.to_text(iterm=render_image_iterm) else: - final_content = message.chat_message.content_to_text() + final_content = message.chat_message.to_text() output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{final_content}\n" if message.chat_message.models_usage: if output_stats: @@ -175,7 +175,7 @@ async def Console( # Print message sender. await aprint(f"{'-' * 10} {message.source} {'-' * 10}", end="\n", flush=True) if isinstance(message, ModelClientStreamingChunkEvent): - await aprint(message.content_to_text(), end="") + await aprint(message.to_text(), end="") streaming_chunks.append(message.content) else: if streaming_chunks: @@ -183,9 +183,9 @@ async def Console( # Chunked messages are already printed, so we just print a newline. await aprint("", end="\n", flush=True) elif isinstance(message, MultiModalMessage): - await aprint(message.content_to_text(iterm=render_image_iterm), end="\n", flush=True) + await aprint(message.to_text(iterm=render_image_iterm), end="\n", flush=True) else: - await aprint(message.content_to_text(), end="\n", flush=True) + await aprint(message.to_text(), end="\n", flush=True) if message.models_usage: if output_stats: await aprint( diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index a9362ef48cb2..c0387ee84764 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -106,13 +106,13 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: class _UnknownMessageType(ChatMessage): content: str - def content_to_model_message(self) -> UserMessage: + def to_model_message(self) -> UserMessage: raise NotImplementedError("This message type is not supported.") - def content_to_model_text(self) -> str: + def to_model_text(self) -> str: raise NotImplementedError("This message type is not supported.") - def content_to_text(self) -> str: + def to_text(self) -> str: raise NotImplementedError("This message type is not supported.") diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index bac10ee9c879..4213087d8ca6 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -134,8 +134,8 @@ async def test_magentic_one_group_chat_basic(runtime: AgentRuntime | None) -> No ) result = await team.run(task="Write a program that prints 'Hello, world!'") assert len(result.messages) == 5 - assert result.messages[2].content_to_text() == "Continue task" - assert result.messages[4].content_to_text() == "print('Hello, world!')" + assert result.messages[2].to_text() == "Continue task" + assert result.messages[4].to_text() == "print('Hello, world!')" assert result.stop_reason is not None and result.stop_reason == "Because" # Test save and load. diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index bcbf9f061578..f569b8d967d0 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -89,7 +89,7 @@ def produced_message_types(self) -> Sequence[type[ChatMessage]]: async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: for chat_message in messages: - self._chat_history.append(chat_message.content_to_model_message()) + self._chat_history.append(chat_message.to_model_message()) try: _, content = await self._generate_reply(cancellation_token=cancellation_token) self._chat_history.append(AssistantMessage(content=content, source=self.name)) diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index 6aa93e1f3184..419c517b3e6d 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -521,7 +521,7 @@ async def on_messages_stream( async def handle_incoming_message(self, message: ChatMessage, cancellation_token: CancellationToken) -> None: """Handle regular text messages by adding them to the thread.""" content: str | List[MessageContentPartParam] | None = None - llm_message = message.content_to_model_message() + llm_message = message.to_model_message() if isinstance(llm_message.content, str): content = llm_message.content else: diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index f5cfcbf1d29f..8e48f187dea0 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -432,7 +432,7 @@ async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]: for chat_message in messages: - self._chat_history.append(chat_message.content_to_model_message()) + self._chat_history.append(chat_message.to_model_message()) self.inner_messages: List[AgentEvent | ChatMessage] = [] self.model_usage: List[RequestUsage] = [] diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py index 575250c513ae..8619d7ae789a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/apprentice.py @@ -192,7 +192,7 @@ async def _assign_task_to_assistant_agent(self, task: str) -> Tuple[Any, Any]: task_result: TaskResult = await assistant_agent.run(task=TextMessage(content=task, source="User")) messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message: AgentEvent | ChatMessage = messages[-1] - response_str = message.content_to_text() + response_str = message.to_text() # Log the model call self.logger.log_model_task( @@ -245,7 +245,7 @@ async def _assign_task_to_magentic_one(self, task: str) -> Tuple[str, str]: response_str_list: List[str] = [] for message in messages: - response_str_list.append(message.content_to_text()) + response_str_list.append(message.to_text()) response_str = "\n".join(response_str_list) self.logger.info("\n----- RESPONSE -----\n\n{}\n".format(response_str)) diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py index 890ca8c34e1e..806524ad8570 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/page_logger.py @@ -345,7 +345,7 @@ def log_model_task( messages: Sequence[AgentEvent | ChatMessage] = task_result.messages message = messages[-1] - response_str = message.content_to_text() + response_str = message.to_text() if not isinstance(response_str, str): response_str = "??" diff --git a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py index d052928057eb..3614a25c76ef 100644 --- a/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py +++ b/python/packages/autogen-ext/src/autogen_ext/ui/_rich_console.py @@ -61,7 +61,7 @@ def _extract_message_content(message: AgentEvent | ChatMessage) -> Tuple[List[st text_parts = [item for item in message.content if isinstance(item, str)] image_parts = [item for item in message.content if isinstance(item, Image)] else: - text_parts = [message.content_to_text()] + text_parts = [message.to_text()] image_parts = [] return text_parts, image_parts From 79ee7a4f66fb9af757c850b9970d2626f1789056 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Tue, 25 Mar 2025 14:08:53 -0700 Subject: [PATCH 26/26] lint --- .../src/autogen_agentchat/messages.py | 8 +- .../teams/_group_chat/_selector_group_chat.py | 2 +- .../agentchat-user-guide/custom-agents.ipynb | 4 +- .../agentchat-user-guide/migration-guide.md | 4 +- .../selector-group-chat.ipynb | 4 +- .../agentchat-user-guide/tracing.ipynb | 800 +++++++++--------- 6 files changed, 411 insertions(+), 411 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index e50b80c59ffd..f03ae61e239d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -109,8 +109,8 @@ def to_model_message(self) -> UserMessage: class TextChatMessage(ChatMessage, ABC): """Base class for all text-only :class:`ChatMessage` types. - It has implementations for :meth:`content_to_text`, :meth:`content_to_model_text`, - and :meth:`content_to_model_message` methods. + It has implementations for :meth:`to_text`, :meth:`to_model_text`, + and :meth:`to_model_message` methods. Inherit from this class if your message content type is a string. """ @@ -140,7 +140,7 @@ class AgentEvent(BaseMessage, ABC): and teams to user and applications. They are not used for agent-to-agent communication and are not expected to be processed by other agents. - You should override the :meth:`content_to_text` method if you want to provide + You should override the :meth:`to_text` method if you want to provide a custom rendering of the content. """ @@ -191,7 +191,7 @@ class MyMessageContent(BaseModel): source="agent1", ) - print(message.content_to_text()) # {"text": "Hello", "number": 42} + print(message.to_text()) # {"text": "Hello", "number": 42} """ diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index 03587920d389..7e1b68814ec3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -405,7 +405,7 @@ def check_calculation(x: int, y: int, answer: int) -> str: ) def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: - if len(messages) == 1 or messages[-1].content_to_text() == "Incorrect!": + if len(messages) == 1 or messages[-1].to_text() == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": return "Agent2" diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index 58746cb700c2..3f4a66896d41 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -363,7 +363,7 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(msg.content_to_model_message())\n", + " await self._model_context.add_message(msg.to_model_message())\n", "\n", " # Get conversation history\n", " history = [\n", @@ -602,7 +602,7 @@ " ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n", " # Add messages to the model context\n", " for msg in messages:\n", - " await self._model_context.add_message(msg.content_to_model_message())\n", + " await self._model_context.add_message(msg.to_model_message())\n", "\n", " # Get conversation history\n", " history = [\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index 3dc779c0f217..9d505d7718a9 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -691,7 +691,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_text()) + print("Assistant:", response.chat_message.to_text()) await model_client.close() asyncio.run(main()) @@ -1331,7 +1331,7 @@ async def main() -> None: if user_input == "exit": break response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken()) - print("Assistant:", response.chat_message.content_to_text()) + print("Assistant:", response.chat_message.to_text()) await model_client.close() diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb index fa3e44b94428..fdc2b9a9d51f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/selector-group-chat.ipynb @@ -666,9 +666,9 @@ " last_message = messages[-1]\n", " if last_message.source == planning_agent.name:\n", " participants = []\n", - " if web_search_agent.name in last_message.content_to_text():\n", + " if web_search_agent.name in last_message.to_text():\n", " participants.append(web_search_agent.name)\n", - " if data_analyst_agent.name in last_message.content_to_text():\n", + " if data_analyst_agent.name in last_message.to_text():\n", " participants.append(data_analyst_agent.name)\n", " if participants:\n", " return participants # SelectorGroupChat will select from the remaining two agents.\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb index 4bdaf1168efd..9c3014e59cc5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tracing.ipynb @@ -1,402 +1,402 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tracing and Observability\n", - "\n", - "AutoGen has [built-in support for tracing](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/telemetry.html) and observability for collecting comprehensive records on the execution of your application. This feature is useful for debugging, performance analysis, and understanding the flow of your application.\n", - "\n", - "This capability is powered by the [OpenTelemetry](https://opentelemetry.io/) library, which means you can use any OpenTelemetry-compatible backend to collect and analyze traces.\n", - "\n", - "## Setup\n", - "\n", - "To begin, you need to install the OpenTelemetry Python package. You can do this using pip:\n", - "\n", - "```bash\n", - "pip install opentelemetry-sdk\n", - "```\n", - "\n", - "Once you have the SDK installed, the simplest way to set up tracing in AutoGen is to:\n", - "\n", - "1. Configure an OpenTelemetry tracer provider\n", - "2. Set up an exporter to send traces to your backend\n", - "3. Connect the tracer provider to the AutoGen runtime\n", - "\n", - "## Telemetry Backend\n", - "\n", - "To collect and view traces, you need to set up a telemetry backend. Several open-source options are available, including Jaeger, Zipkin. For this example, we will use Jaeger as our telemetry backend.\n", - "\n", - "For a quick start, you can run Jaeger locally using Docker:\n", - "\n", - "```bash\n", - "docker run -d --name jaeger \\\n", - " -e COLLECTOR_OTLP_ENABLED=true \\\n", - " -p 16686:16686 \\\n", - " -p 4317:4317 \\\n", - " -p 4318:4318 \\\n", - " jaegertracing/all-in-one:latest\n", - "```\n", - "\n", - "This command starts a Jaeger instance that listens on port 16686 for the Jaeger UI and port 4317 for the OpenTelemetry collector. You can access the Jaeger UI at `http://localhost:16686`.\n", - "\n", - "## Instrumenting an AgentChat Team\n", - "\n", - "In the following section, we will review how to enable tracing with an AutoGen GroupChat team. The AutoGen runtime already supports open telemetry (automatically logging message metadata). To begin, we will create a tracing service that will be used to instrument the AutoGen runtime. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from opentelemetry import trace\n", - "from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n", - "from opentelemetry.sdk.resources import Resource\n", - "from opentelemetry.sdk.trace import TracerProvider\n", - "from opentelemetry.sdk.trace.export import BatchSpanProcessor\n", - "\n", - "otel_exporter = OTLPSpanExporter(endpoint=\"http://localhost:4317\", insecure=True)\n", - "tracer_provider = TracerProvider(resource=Resource({\"service.name\": \"autogen-test-agentchat\"}))\n", - "span_processor = BatchSpanProcessor(otel_exporter)\n", - "tracer_provider.add_span_processor(span_processor)\n", - "trace.set_tracer_provider(tracer_provider)\n", - "\n", - "# we will get reference this tracer later using its service name\n", - "# tracer = trace.get_tracer(\"autogen-test-agentchat\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "All of the code to create a [team](./tutorial/teams.ipynb) should already be familiar to you. An important note here is that all AgentChat agents and teams are run using the AutoGen core API runtime. In turn, the runtime is already instrumented to log [runtime messaging events (metadata)] (https://github.com/microsoft/autogen/blob/main/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py) including:\n", - "\n", - "- **create**: When a message is created\n", - "- **send**: When a message is sent\n", - "- **publish**: When a message is published\n", - "- **receive**: When a message is received\n", - "- **intercept**: When a message is intercepted\n", - "- **process**: When a message is processed\n", - "- **ack**: When a message is acknowledged \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", - "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import SingleThreadedAgentRuntime\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "\n", - "def search_web_tool(query: str) -> str:\n", - " if \"2006-2007\" in query:\n", - " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \"\"\"\n", - " elif \"2007-2008\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", - " elif \"2008-2009\" in query:\n", - " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", - " return \"No data found.\"\n", - "\n", - "\n", - "def percentage_change_tool(start: float, end: float) -> float:\n", - " return ((end - start) / start) * 100\n", - "\n", - "\n", - "async def main() -> None:\n", - " model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "\n", - " planning_agent = AssistantAgent(\n", - " \"PlanningAgent\",\n", - " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a planning agent.\n", - " Your job is to break down complex tasks into smaller, manageable subtasks.\n", - " Your team members are:\n", - " WebSearchAgent: Searches for information\n", - " DataAnalystAgent: Performs calculations\n", - "\n", - " You only plan and delegate tasks - you do not execute them yourself.\n", - "\n", - " When assigning tasks, use this format:\n", - " 1. : \n", - "\n", - " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", - " \"\"\",\n", - " )\n", - "\n", - " web_search_agent = AssistantAgent(\n", - " \"WebSearchAgent\",\n", - " description=\"An agent for searching information on the web.\",\n", - " tools=[search_web_tool],\n", - " model_client=model_client,\n", - " system_message=\"\"\"\n", - " You are a web search agent.\n", - " Your only tool is search_tool - use it to find information.\n", - " You make only one search call at a time.\n", - " Once you have the results, you never do calculations based on them.\n", - " \"\"\",\n", - " )\n", - "\n", - " data_analyst_agent = AssistantAgent(\n", - " \"DataAnalystAgent\",\n", - " description=\"An agent for performing calculations.\",\n", - " model_client=model_client,\n", - " tools=[percentage_change_tool],\n", - " system_message=\"\"\"\n", - " You are a data analyst.\n", - " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", - " If you have not seen the data, ask for it.\n", - " \"\"\",\n", - " )\n", - "\n", - " text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", - " max_messages_termination = MaxMessageTermination(max_messages=25)\n", - " termination = text_mention_termination | max_messages_termination\n", - "\n", - " selector_prompt = \"\"\"Select an agent to perform task.\n", - "\n", - " {roles}\n", - "\n", - " Current conversation context:\n", - " {history}\n", - "\n", - " Read the above conversation, then select an agent from {participants} to perform the next task.\n", - " Make sure the planner agent has assigned tasks before other agents start working.\n", - " Only select one agent.\n", - " \"\"\"\n", - "\n", - " task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", - "\n", - " tracer = trace.get_tracer(\"autogen-test-agentchat\")\n", - " with tracer.start_as_current_span(\"runtime\"):\n", - " team = SelectorGroupChat(\n", - " [planning_agent, web_search_agent, data_analyst_agent],\n", - " model_client=model_client,\n", - " termination_condition=termination,\n", - " selector_prompt=selector_prompt,\n", - " allow_repeated_speaker=True,\n", - " )\n", - " await Console(team.run_stream(task=task))\n", - "\n", - " await model_client.close()\n", - "\n", - "\n", - "# asyncio.run(main())" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- user ----------\n", - "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", - "---------- PlanningAgent ----------\n", - "To accomplish this, we can break down the tasks as follows:\n", - "\n", - "1. WebSearchAgent: Search for the Miami Heat player with the highest points during the 2006-2007 NBA season.\n", - "2. WebSearchAgent: Find the total rebounds for the identified player in both the 2007-2008 and 2008-2009 NBA seasons.\n", - "3. DataAnalystAgent: Calculate the percentage change in total rebounds for the player between the 2007-2008 and 2008-2009 seasons.\n", - "\n", - "Once these tasks are complete, I will summarize the findings.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", - " Udonis Haslem: 844 points\n", - " Dwayne Wade: 1397 points\n", - " James Posey: 550 points\n", - " ...\n", - " \n", - "---------- WebSearchAgent ----------\n", - "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1,397 points. Now, let's find his total rebounds for the 2007-2008 and 2008-2009 NBA seasons.\n", - "---------- WebSearchAgent ----------\n", - "[FunctionCall(id='call_GL7KkWKj9ejIM8FfpgXe2dPk', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_X81huZoiA30zIjSAIDgb8ebe', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", - "---------- WebSearchAgent ----------\n", - "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_GL7KkWKj9ejIM8FfpgXe2dPk', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_X81huZoiA30zIjSAIDgb8ebe', is_error=False)]\n", - "---------- WebSearchAgent ----------\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", - "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionCall(id='call_kB50RkFVqHptA7FOf0lL2RS8', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", - "---------- DataAnalystAgent ----------\n", - "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_kB50RkFVqHptA7FOf0lL2RS8', is_error=False)]\n", - "---------- DataAnalystAgent ----------\n", - "85.98130841121495\n", - "---------- PlanningAgent ----------\n", - "The Miami Heat player with the highest points during the 2006-2007 NBA season was Dwayne Wade, who scored 1,397 points. The percentage increase in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) was approximately 86%.\n", - "\n", - "TERMINATE\n" - ] - } - ], - "source": [ - "await main()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can then use the Jaeger UI to view the traces collected from the application run above. \n", - "\n", - "![Jaeger UI](jaeger.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Traces \n", - "\n", - "So far, we are logging only the default events that are generated by the AutoGen runtime (message created, publish etc). However, you can also create custom spans to log specific events in your application. \n", - "\n", - "In the example below, we will show how to log messages from the `RoundRobinGroupChat` team as they are generated by adding custom spans around the team to log runtime events and spans to log messages generated by the team.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "-- primary_agent -- : Leaves cascade like gold, \n", - "Whispering winds cool the earth.\n", - "primary_agent: Leaves cascade like gold, \n", - "Whispering winds cool the earth.\n", - "\n", - "-- critic_agent -- : Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n", - "\n", - "Leaves cascade like gold, \n", - "Whispering winds cool the air. \n", - "\n", - "Please adjust the second line to reflect a five-syllable count. Thank you!\n", - "critic_agent: Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n", - "\n", - "Leaves cascade like gold, \n", - "Whispering winds cool the air. \n", - "\n", - "Please adjust the second line to reflect a five-syllable count. Thank you!\n", - "\n", - "-- primary_agent -- : Leaves cascade like gold, \n", - "Whispering winds cool the air.\n", - "primary_agent: Leaves cascade like gold, \n", - "Whispering winds cool the air.\n", - "\n", - "-- critic_agent -- : APPROVE\n", - "critic_agent: APPROVE\n" - ] - } - ], - "source": [ - "from autogen_agentchat.base import TaskResult\n", - "from autogen_agentchat.conditions import ExternalTermination\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_core import CancellationToken\n", - "\n", - "\n", - "async def run_agents() -> None:\n", - " # Create an OpenAI model client.\n", - " model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", - "\n", - " # Create the primary agent.\n", - " primary_agent = AssistantAgent(\n", - " \"primary_agent\",\n", - " model_client=model_client,\n", - " system_message=\"You are a helpful AI assistant.\",\n", - " )\n", - "\n", - " # Create the critic agent.\n", - " critic_agent = AssistantAgent(\n", - " \"critic_agent\",\n", - " model_client=model_client,\n", - " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", - " )\n", - "\n", - " # Define a termination condition that stops the task if the critic approves.\n", - " text_termination = TextMentionTermination(\"APPROVE\")\n", - "\n", - " tracer = trace.get_tracer(\"autogen-test-agentchat\")\n", - " with tracer.start_as_current_span(\"runtime_round_robin_events\"):\n", - " team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=text_termination)\n", - "\n", - " response_stream = team.run_stream(task=\"Write a 2 line haiku about the fall season\")\n", - " async for response in response_stream:\n", - " async for response in response_stream:\n", - " if not isinstance(response, TaskResult):\n", - " print(f\"\\n-- {response.source} -- : {response.content_to_text()}\")\n", - " with tracer.start_as_current_span(f\"agent_message.{response.source}\") as message_span:\n", - " message_span.set_attribute(\"agent.name\", response.source)\n", - " message_span.set_attribute(\"message.content\", response.content_to_text())\n", - " print(f\"{response.source}: {response.content_to_text()}\")\n", - "\n", - " await model_client.close()\n", - "\n", - "\n", - "await run_agents()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "In the code above, we create a new span for each message sent by the agent. We set attributes on the span to include the agent's name and the message content. This allows us to trace the flow of messages through our application and understand how they are processed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tracing and Observability\n", + "\n", + "AutoGen has [built-in support for tracing](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/telemetry.html) and observability for collecting comprehensive records on the execution of your application. This feature is useful for debugging, performance analysis, and understanding the flow of your application.\n", + "\n", + "This capability is powered by the [OpenTelemetry](https://opentelemetry.io/) library, which means you can use any OpenTelemetry-compatible backend to collect and analyze traces.\n", + "\n", + "## Setup\n", + "\n", + "To begin, you need to install the OpenTelemetry Python package. You can do this using pip:\n", + "\n", + "```bash\n", + "pip install opentelemetry-sdk\n", + "```\n", + "\n", + "Once you have the SDK installed, the simplest way to set up tracing in AutoGen is to:\n", + "\n", + "1. Configure an OpenTelemetry tracer provider\n", + "2. Set up an exporter to send traces to your backend\n", + "3. Connect the tracer provider to the AutoGen runtime\n", + "\n", + "## Telemetry Backend\n", + "\n", + "To collect and view traces, you need to set up a telemetry backend. Several open-source options are available, including Jaeger, Zipkin. For this example, we will use Jaeger as our telemetry backend.\n", + "\n", + "For a quick start, you can run Jaeger locally using Docker:\n", + "\n", + "```bash\n", + "docker run -d --name jaeger \\\n", + " -e COLLECTOR_OTLP_ENABLED=true \\\n", + " -p 16686:16686 \\\n", + " -p 4317:4317 \\\n", + " -p 4318:4318 \\\n", + " jaegertracing/all-in-one:latest\n", + "```\n", + "\n", + "This command starts a Jaeger instance that listens on port 16686 for the Jaeger UI and port 4317 for the OpenTelemetry collector. You can access the Jaeger UI at `http://localhost:16686`.\n", + "\n", + "## Instrumenting an AgentChat Team\n", + "\n", + "In the following section, we will review how to enable tracing with an AutoGen GroupChat team. The AutoGen runtime already supports open telemetry (automatically logging message metadata). To begin, we will create a tracing service that will be used to instrument the AutoGen runtime. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from opentelemetry import trace\n", + "from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n", + "from opentelemetry.sdk.resources import Resource\n", + "from opentelemetry.sdk.trace import TracerProvider\n", + "from opentelemetry.sdk.trace.export import BatchSpanProcessor\n", + "\n", + "otel_exporter = OTLPSpanExporter(endpoint=\"http://localhost:4317\", insecure=True)\n", + "tracer_provider = TracerProvider(resource=Resource({\"service.name\": \"autogen-test-agentchat\"}))\n", + "span_processor = BatchSpanProcessor(otel_exporter)\n", + "tracer_provider.add_span_processor(span_processor)\n", + "trace.set_tracer_provider(tracer_provider)\n", + "\n", + "# we will get reference this tracer later using its service name\n", + "# tracer = trace.get_tracer(\"autogen-test-agentchat\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "All of the code to create a [team](./tutorial/teams.ipynb) should already be familiar to you. An important note here is that all AgentChat agents and teams are run using the AutoGen core API runtime. In turn, the runtime is already instrumented to log [runtime messaging events (metadata)] (https://github.com/microsoft/autogen/blob/main/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py) including:\n", + "\n", + "- **create**: When a message is created\n", + "- **send**: When a message is sent\n", + "- **publish**: When a message is published\n", + "- **receive**: When a message is received\n", + "- **intercept**: When a message is intercepted\n", + "- **process**: When a message is processed\n", + "- **ack**: When a message is acknowledged \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import SingleThreadedAgentRuntime\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100\n", + "\n", + "\n", + "async def main() -> None:\n", + " model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + " planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " WebSearchAgent: Searches for information\n", + " DataAnalystAgent: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + " )\n", + "\n", + " web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + " )\n", + "\n", + " data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " If you have not seen the data, ask for it.\n", + " \"\"\",\n", + " )\n", + "\n", + " text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + " max_messages_termination = MaxMessageTermination(max_messages=25)\n", + " termination = text_mention_termination | max_messages_termination\n", + "\n", + " selector_prompt = \"\"\"Select an agent to perform task.\n", + "\n", + " {roles}\n", + "\n", + " Current conversation context:\n", + " {history}\n", + "\n", + " Read the above conversation, then select an agent from {participants} to perform the next task.\n", + " Make sure the planner agent has assigned tasks before other agents start working.\n", + " Only select one agent.\n", + " \"\"\"\n", + "\n", + " task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", + "\n", + " tracer = trace.get_tracer(\"autogen-test-agentchat\")\n", + " with tracer.start_as_current_span(\"runtime\"):\n", + " team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True,\n", + " )\n", + " await Console(team.run_stream(task=task))\n", + "\n", + " await model_client.close()\n", + "\n", + "\n", + "# asyncio.run(main())" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To accomplish this, we can break down the tasks as follows:\n", + "\n", + "1. WebSearchAgent: Search for the Miami Heat player with the highest points during the 2006-2007 NBA season.\n", + "2. WebSearchAgent: Find the total rebounds for the identified player in both the 2007-2008 and 2008-2009 NBA seasons.\n", + "3. DataAnalystAgent: Calculate the percentage change in total rebounds for the player between the 2007-2008 and 2008-2009 seasons.\n", + "\n", + "Once these tasks are complete, I will summarize the findings.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \n", + "---------- WebSearchAgent ----------\n", + "Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1,397 points. Now, let's find his total rebounds for the 2007-2008 and 2008-2009 NBA seasons.\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_GL7KkWKj9ejIM8FfpgXe2dPk', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_X81huZoiA30zIjSAIDgb8ebe', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_GL7KkWKj9ejIM8FfpgXe2dPk', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_X81huZoiA30zIjSAIDgb8ebe', is_error=False)]\n", + "---------- WebSearchAgent ----------\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n", + "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_kB50RkFVqHptA7FOf0lL2RS8', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_kB50RkFVqHptA7FOf0lL2RS8', is_error=False)]\n", + "---------- DataAnalystAgent ----------\n", + "85.98130841121495\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points during the 2006-2007 NBA season was Dwayne Wade, who scored 1,397 points. The percentage increase in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) was approximately 86%.\n", + "\n", + "TERMINATE\n" + ] + } + ], + "source": [ + "await main()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can then use the Jaeger UI to view the traces collected from the application run above. \n", + "\n", + "![Jaeger UI](jaeger.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Traces \n", + "\n", + "So far, we are logging only the default events that are generated by the AutoGen runtime (message created, publish etc). However, you can also create custom spans to log specific events in your application. \n", + "\n", + "In the example below, we will show how to log messages from the `RoundRobinGroupChat` team as they are generated by adding custom spans around the team to log runtime events and spans to log messages generated by the team.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "-- primary_agent -- : Leaves cascade like gold, \n", + "Whispering winds cool the earth.\n", + "primary_agent: Leaves cascade like gold, \n", + "Whispering winds cool the earth.\n", + "\n", + "-- critic_agent -- : Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n", + "\n", + "Leaves cascade like gold, \n", + "Whispering winds cool the air. \n", + "\n", + "Please adjust the second line to reflect a five-syllable count. Thank you!\n", + "critic_agent: Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n", + "\n", + "Leaves cascade like gold, \n", + "Whispering winds cool the air. \n", + "\n", + "Please adjust the second line to reflect a five-syllable count. Thank you!\n", + "\n", + "-- primary_agent -- : Leaves cascade like gold, \n", + "Whispering winds cool the air.\n", + "primary_agent: Leaves cascade like gold, \n", + "Whispering winds cool the air.\n", + "\n", + "-- critic_agent -- : APPROVE\n", + "critic_agent: APPROVE\n" + ] + } + ], + "source": [ + "from autogen_agentchat.base import TaskResult\n", + "from autogen_agentchat.conditions import ExternalTermination\n", + "from autogen_agentchat.teams import RoundRobinGroupChat\n", + "from autogen_core import CancellationToken\n", + "\n", + "\n", + "async def run_agents() -> None:\n", + " # Create an OpenAI model client.\n", + " model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n", + "\n", + " # Create the primary agent.\n", + " primary_agent = AssistantAgent(\n", + " \"primary_agent\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + " )\n", + "\n", + " # Create the critic agent.\n", + " critic_agent = AssistantAgent(\n", + " \"critic_agent\",\n", + " model_client=model_client,\n", + " system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", + " )\n", + "\n", + " # Define a termination condition that stops the task if the critic approves.\n", + " text_termination = TextMentionTermination(\"APPROVE\")\n", + "\n", + " tracer = trace.get_tracer(\"autogen-test-agentchat\")\n", + " with tracer.start_as_current_span(\"runtime_round_robin_events\"):\n", + " team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=text_termination)\n", + "\n", + " response_stream = team.run_stream(task=\"Write a 2 line haiku about the fall season\")\n", + " async for response in response_stream:\n", + " async for response in response_stream:\n", + " if not isinstance(response, TaskResult):\n", + " print(f\"\\n-- {response.source} -- : {response.to_text()}\")\n", + " with tracer.start_as_current_span(f\"agent_message.{response.source}\") as message_span:\n", + " message_span.set_attribute(\"agent.name\", response.source)\n", + " message_span.set_attribute(\"message.content\", response.to_text())\n", + " print(f\"{response.source}: {response.to_text()}\")\n", + "\n", + " await model_client.close()\n", + "\n", + "\n", + "await run_agents()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "In the code above, we create a new span for each message sent by the agent. We set attributes on the span to include the agent's name and the message content. This allows us to trace the flow of messages through our application and understand how they are processed." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 }