Skip to content

SlackAgent

autogen.agents.experimental.SlackAgent #

SlackAgent(name, system_message=None, *, bot_token, channel_id, has_writing_instructions=True, **kwargs)

Bases: ConversableAgent

An agent that can send messages and retrieve messages on Slack.

Initialize the SlackAgent.

PARAMETER DESCRIPTION
name

name of the agent.

TYPE: str

system_message

system message for the ChatCompletion inference.

TYPE: Optional[str] DEFAULT: None

bot_token

Bot User OAuth Token starting with "xoxb-".

TYPE: str

channel_id

Channel ID where messages will be sent.

TYPE: str

has_writing_instructions

Whether to add writing instructions to the system message. Defaults to True.

TYPE: bool DEFAULT: True

**kwargs

Additional keyword arguments passed to the parent ConversableAgent class.

TYPE: Any DEFAULT: {}

Source code in autogen/agents/experimental/slack/slack.py
def __init__(
    self,
    name: str,
    system_message: Optional[str] = None,
    *,
    bot_token: str,
    channel_id: str,
    has_writing_instructions: bool = True,
    **kwargs: Any,
) -> None:
    """Initialize the SlackAgent.

    Args:
        name: name of the agent.
        system_message: system message for the ChatCompletion inference.
        bot_token: Bot User OAuth Token starting with "xoxb-".
        channel_id: Channel ID where messages will be sent.
        has_writing_instructions: Whether to add writing instructions to the system message. Defaults to True.
        **kwargs: Additional keyword arguments passed to the parent ConversableAgent class.
    """
    slack_system_message = system_message or self.DEFAULT_SYSTEM_MESSAGE

    self._send_tool = SlackSendTool(bot_token=bot_token, channel_id=channel_id)
    self._retrieve_tool = SlackRetrieveTool(bot_token=bot_token, channel_id=channel_id)

    # Add formatting instructions
    if has_writing_instructions:
        formatting_instructions = (
            "\nFormat guidelines for Slack:\n"
            "Format guidelines for Slack:\n"
            "1. Max message length: 40,000 characters\n"
            "2. Supports Markdown-like formatting:\n"
            "   - *text* for italic\n"
            "   - **text** for bold\n"
            "   - `code` for inline code\n"
            "   - ```code block``` for multi-line code\n"
            "3. Supports message threading for organized discussions\n"
            "4. Can use :emoji_name: for emoji reactions\n"
            "5. Supports block quotes with > prefix\n"
            "6. Can use <!here> or <!channel> for notifications"
        )

        slack_system_message = slack_system_message + formatting_instructions

    super().__init__(name=name, system_message=slack_system_message, **kwargs)

    self.register_for_llm()(self._send_tool)
    self.register_for_llm()(self._retrieve_tool)

name property #

name

Get the name of the agent.

description property writable #

description

Get the description of the agent.

system_message property #

system_message

Return the system message.

DEFAULT_CONFIG class-attribute instance-attribute #

DEFAULT_CONFIG = False

MAX_CONSECUTIVE_AUTO_REPLY class-attribute instance-attribute #

MAX_CONSECUTIVE_AUTO_REPLY = 100

DEFAULT_SUMMARY_PROMPT class-attribute instance-attribute #

DEFAULT_SUMMARY_PROMPT = 'Summarize the takeaway from the conversation. Do not add any introductory phrases.'

DEFAULT_SUMMARY_METHOD class-attribute instance-attribute #

DEFAULT_SUMMARY_METHOD = 'last_msg'

llm_config instance-attribute #

llm_config

silent instance-attribute #

silent = silent

run_executor instance-attribute #

run_executor = None

client_cache instance-attribute #

client_cache = None

human_input_mode instance-attribute #

human_input_mode = human_input_mode

reply_at_receive instance-attribute #

reply_at_receive = defaultdict(bool)

hook_lists instance-attribute #

hook_lists = {'process_last_received_message': [], 'process_all_messages_before_reply': [], 'process_message_before_send': [], 'update_agent_state': []}

code_executor property #

code_executor

The code executor used by this agent. Returns None if code execution is disabled.

chat_messages property #

chat_messages

A dictionary of conversations from agent to list of messages.

use_docker property #

use_docker

Bool value of whether to use docker to execute the code, or str value of the docker image name to use, or None when code execution is disabled.

tools property #

tools

Get the agent's tools (registered for LLM)

Note this is a copy of the tools list, use add_tool and remove_tool to modify the tools list.

function_map property #

function_map

Return the function map.

DEFAULT_SYSTEM_MESSAGE class-attribute instance-attribute #

DEFAULT_SYSTEM_MESSAGE = 'You are a helpful AI assistant that communicates through Slack. Remember that Slack uses Markdown-like formatting and has message length limits. Keep messages clear and concise, and consider using appropriate formatting when helpful.'

send #

send(message, recipient, request_reply=None, silent=False)

Send a message to another agent.

PARAMETER DESCRIPTION
message

message to be sent. The message could contain the following fields: - content (str or List): Required, the content of the message. (Can be None) - function_call (str): the name of the function to be called. - name (str): the name of the function to be called. - role (str): the role of the message, any role that is not "function" will be modified to "assistant". - context (dict): the context of the message, which will be passed to OpenAIWrapper.create. For example, one agent can send a message A as:

TYPE: dict or str

{
    "content": lambda context: context["use_tool_msg"],
    "context": {"use_tool_msg": "Use tool X if they are relevant."},
}
Next time, one agent can send a message B with a different "use_tool_msg". Then the content of message A will be refreshed to the new "use_tool_msg". So effectively, this provides a way for an agent to send a "link" and modify the content of the "link" later. recipient (Agent): the recipient of the message. request_reply (bool or None): whether to request a reply from the recipient. silent (bool or None): (Experimental) whether to print the message sent.

RAISES DESCRIPTION
ValueError

if the message can't be converted into a valid ChatCompletion message.

Source code in autogen/agentchat/conversable_agent.py
def send(
    self,
    message: Union[dict[str, Any], str],
    recipient: Agent,
    request_reply: Optional[bool] = None,
    silent: Optional[bool] = False,
):
    """Send a message to another agent.

    Args:
        message (dict or str): message to be sent.
            The message could contain the following fields:
            - content (str or List): Required, the content of the message. (Can be None)
            - function_call (str): the name of the function to be called.
            - name (str): the name of the function to be called.
            - role (str): the role of the message, any role that is not "function"
                will be modified to "assistant".
            - context (dict): the context of the message, which will be passed to
                [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).
                For example, one agent can send a message A as:
    ```python
    {
        "content": lambda context: context["use_tool_msg"],
        "context": {"use_tool_msg": "Use tool X if they are relevant."},
    }
    ```
                Next time, one agent can send a message B with a different "use_tool_msg".
                Then the content of message A will be refreshed to the new "use_tool_msg".
                So effectively, this provides a way for an agent to send a "link" and modify
                the content of the "link" later.
        recipient (Agent): the recipient of the message.
        request_reply (bool or None): whether to request a reply from the recipient.
        silent (bool or None): (Experimental) whether to print the message sent.

    Raises:
        ValueError: if the message can't be converted into a valid ChatCompletion message.
    """
    message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
    # When the agent composes and sends the message, the role of the message is "assistant"
    # unless it's "function".
    valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
    if valid:
        recipient.receive(message, self, request_reply, silent)
    else:
        raise ValueError(
            "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
        )

a_send async #

a_send(message, recipient, request_reply=None, silent=False)

(async) Send a message to another agent.

PARAMETER DESCRIPTION
message

message to be sent. The message could contain the following fields: - content (str or List): Required, the content of the message. (Can be None) - function_call (str): the name of the function to be called. - name (str): the name of the function to be called. - role (str): the role of the message, any role that is not "function" will be modified to "assistant". - context (dict): the context of the message, which will be passed to OpenAIWrapper.create. For example, one agent can send a message A as:

TYPE: dict or str

{
    "content": lambda context: context["use_tool_msg"],
    "context": {"use_tool_msg": "Use tool X if they are relevant."},
}
Next time, one agent can send a message B with a different "use_tool_msg". Then the content of message A will be refreshed to the new "use_tool_msg". So effectively, this provides a way for an agent to send a "link" and modify the content of the "link" later. recipient (Agent): the recipient of the message. request_reply (bool or None): whether to request a reply from the recipient. silent (bool or None): (Experimental) whether to print the message sent.

RAISES DESCRIPTION
ValueError

if the message can't be converted into a valid ChatCompletion message.

Source code in autogen/agentchat/conversable_agent.py
async def a_send(
    self,
    message: Union[dict[str, Any], str],
    recipient: Agent,
    request_reply: Optional[bool] = None,
    silent: Optional[bool] = False,
):
    """(async) Send a message to another agent.

    Args:
        message (dict or str): message to be sent.
            The message could contain the following fields:
            - content (str or List): Required, the content of the message. (Can be None)
            - function_call (str): the name of the function to be called.
            - name (str): the name of the function to be called.
            - role (str): the role of the message, any role that is not "function"
                will be modified to "assistant".
            - context (dict): the context of the message, which will be passed to
                [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).
                For example, one agent can send a message A as:
    ```python
    {
        "content": lambda context: context["use_tool_msg"],
        "context": {"use_tool_msg": "Use tool X if they are relevant."},
    }
    ```
                Next time, one agent can send a message B with a different "use_tool_msg".
                Then the content of message A will be refreshed to the new "use_tool_msg".
                So effectively, this provides a way for an agent to send a "link" and modify
                the content of the "link" later.
        recipient (Agent): the recipient of the message.
        request_reply (bool or None): whether to request a reply from the recipient.
        silent (bool or None): (Experimental) whether to print the message sent.

    Raises:
        ValueError: if the message can't be converted into a valid ChatCompletion message.
    """
    message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
    # When the agent composes and sends the message, the role of the message is "assistant"
    # unless it's "function".
    valid = self._append_oai_message(message, "assistant", recipient, is_sending=True)
    if valid:
        await recipient.a_receive(message, self, request_reply, silent)
    else:
        raise ValueError(
            "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
        )

receive #

receive(message, sender, request_reply=None, silent=False)

Receive a message from another agent.

Once a message is received, this function sends a reply to the sender or stop. The reply can be generated automatically or entered manually by a human.

PARAMETER DESCRIPTION
message

message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided). 1. "content": content of the message, can be None. 2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls") 3. "tool_calls": a list of dictionaries containing the function name and arguments. 4. "role": role of the message, can be "assistant", "user", "function", "tool". This field is only needed to distinguish between "function" or "assistant"/"user". 5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name. 6. "context" (dict): the context of the message, which will be passed to OpenAIWrapper.create.

TYPE: dict or str

sender

sender of an Agent instance.

TYPE: Agent

request_reply

whether a reply is requested from the sender. If None, the value is determined by self.reply_at_receive[sender].

TYPE: bool or None DEFAULT: None

silent

(Experimental) whether to print the message received.

TYPE: bool or None DEFAULT: False

RAISES DESCRIPTION
ValueError

if the message can't be converted into a valid ChatCompletion message.

Source code in autogen/agentchat/conversable_agent.py
def receive(
    self,
    message: Union[dict[str, Any], str],
    sender: Agent,
    request_reply: Optional[bool] = None,
    silent: Optional[bool] = False,
):
    """Receive a message from another agent.

    Once a message is received, this function sends a reply to the sender or stop.
    The reply can be generated automatically or entered manually by a human.

    Args:
        message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
            1. "content": content of the message, can be None.
            2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
            3. "tool_calls": a list of dictionaries containing the function name and arguments.
            4. "role": role of the message, can be "assistant", "user", "function", "tool".
                This field is only needed to distinguish between "function" or "assistant"/"user".
            5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
            6. "context" (dict): the context of the message, which will be passed to
                [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).
        sender: sender of an Agent instance.
        request_reply (bool or None): whether a reply is requested from the sender.
            If None, the value is determined by `self.reply_at_receive[sender]`.
        silent (bool or None): (Experimental) whether to print the message received.

    Raises:
        ValueError: if the message can't be converted into a valid ChatCompletion message.
    """
    self._process_received_message(message, sender, silent)
    if request_reply is False or (request_reply is None and self.reply_at_receive[sender] is False):
        return
    reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
    if reply is not None:
        self.send(reply, sender, silent=silent)

a_receive async #

a_receive(message, sender, request_reply=None, silent=False)

(async) Receive a message from another agent.

Once a message is received, this function sends a reply to the sender or stop. The reply can be generated automatically or entered manually by a human.

PARAMETER DESCRIPTION
message

message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided). 1. "content": content of the message, can be None. 2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls") 3. "tool_calls": a list of dictionaries containing the function name and arguments. 4. "role": role of the message, can be "assistant", "user", "function". This field is only needed to distinguish between "function" or "assistant"/"user". 5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name. 6. "context" (dict): the context of the message, which will be passed to OpenAIWrapper.create.

TYPE: dict or str

sender

sender of an Agent instance.

TYPE: Agent

request_reply

whether a reply is requested from the sender. If None, the value is determined by self.reply_at_receive[sender].

TYPE: bool or None DEFAULT: None

silent

(Experimental) whether to print the message received.

TYPE: bool or None DEFAULT: False

RAISES DESCRIPTION
ValueError

if the message can't be converted into a valid ChatCompletion message.

Source code in autogen/agentchat/conversable_agent.py
async def a_receive(
    self,
    message: Union[dict[str, Any], str],
    sender: Agent,
    request_reply: Optional[bool] = None,
    silent: Optional[bool] = False,
):
    """(async) Receive a message from another agent.

    Once a message is received, this function sends a reply to the sender or stop.
    The reply can be generated automatically or entered manually by a human.

    Args:
        message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
            1. "content": content of the message, can be None.
            2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
            3. "tool_calls": a list of dictionaries containing the function name and arguments.
            4. "role": role of the message, can be "assistant", "user", "function".
                This field is only needed to distinguish between "function" or "assistant"/"user".
            5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
            6. "context" (dict): the context of the message, which will be passed to
                [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).
        sender: sender of an Agent instance.
        request_reply (bool or None): whether a reply is requested from the sender.
            If None, the value is determined by `self.reply_at_receive[sender]`.
        silent (bool or None): (Experimental) whether to print the message received.

    Raises:
        ValueError: if the message can't be converted into a valid ChatCompletion message.
    """
    self._process_received_message(message, sender, silent)
    if request_reply is False or (request_reply is None and self.reply_at_receive[sender] is False):
        return
    reply = await self.a_generate_reply(messages=self.chat_messages[sender], sender=sender)
    if reply is not None:
        await self.a_send(reply, sender, silent=silent)

generate_reply #

generate_reply(messages=None, sender=None, **kwargs)

Reply based on the conversation history and the sender.

Either messages or sender must be provided. Register a reply_func with None as one trigger for it to be activated when messages is non-empty and sender is None. Use registered auto reply functions to generate replies. By default, the following functions are checked in order: 1. check_termination_and_human_reply 2. generate_function_call_reply (deprecated in favor of tool_calls) 3. generate_tool_calls_reply 4. generate_code_execution_reply 5. generate_oai_reply Every function returns a tuple (final, reply). When a function returns final=False, the next function will be checked. So by default, termination and human reply will be checked first. If not terminating and human reply is skipped, execute function or code and return the result. AI replies are generated only when no code execution is performed.

PARAMETER DESCRIPTION
messages

a list of messages in the conversation history.

TYPE: Optional[list[dict[str, Any]]] DEFAULT: None

sender

sender of an Agent instance.

TYPE: Optional[Agent] DEFAULT: None

**kwargs

Additional arguments to customize reply generation. Supported kwargs: - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from the reply generation process. Functions in this list will be skipped even if they would normally be triggered.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
Optional[Union[str, dict[str, Any]]]

str or dict or None: reply. None if no reply is generated.

Source code in autogen/agentchat/conversable_agent.py
def generate_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional["Agent"] = None,
    **kwargs: Any,
) -> Optional[Union[str, dict[str, Any]]]:
    """Reply based on the conversation history and the sender.

    Either messages or sender must be provided.
    Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
    Use registered auto reply functions to generate replies.
    By default, the following functions are checked in order:
    1. check_termination_and_human_reply
    2. generate_function_call_reply (deprecated in favor of tool_calls)
    3. generate_tool_calls_reply
    4. generate_code_execution_reply
    5. generate_oai_reply
    Every function returns a tuple (final, reply).
    When a function returns final=False, the next function will be checked.
    So by default, termination and human reply will be checked first.
    If not terminating and human reply is skipped, execute function or code and return the result.
    AI replies are generated only when no code execution is performed.

    Args:
        messages: a list of messages in the conversation history.
        sender: sender of an Agent instance.
        **kwargs (Any): Additional arguments to customize reply generation. Supported kwargs:
            - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
            the reply generation process. Functions in this list will be skipped even if
            they would normally be triggered.

    Returns:
        str or dict or None: reply. None if no reply is generated.
    """
    if all((messages is None, sender is None)):
        error_msg = f"Either {messages=} or {sender=} must be provided."
        logger.error(error_msg)
        raise AssertionError(error_msg)

    if messages is None:
        messages = self._oai_messages[sender]

    # Call the hookable method that gives registered hooks a chance to update agent state, used for their context variables.
    self.update_agent_state_before_reply(messages)

    # Call the hookable method that gives registered hooks a chance to process the last message.
    # Message modifications do not affect the incoming messages or self._oai_messages.
    messages = self.process_last_received_message(messages)

    # Call the hookable method that gives registered hooks a chance to process all messages.
    # Message modifications do not affect the incoming messages or self._oai_messages.
    messages = self.process_all_messages_before_reply(messages)

    for reply_func_tuple in self._reply_func_list:
        reply_func = reply_func_tuple["reply_func"]
        if "exclude" in kwargs and reply_func in kwargs["exclude"]:
            continue
        if inspect.iscoroutinefunction(reply_func):
            continue
        if self._match_trigger(reply_func_tuple["trigger"], sender):
            final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
            if logging_enabled():
                log_event(
                    self,
                    "reply_func_executed",
                    reply_func_module=reply_func.__module__,
                    reply_func_name=reply_func.__name__,
                    final=final,
                    reply=reply,
                )
            if final:
                return reply
    return self._default_auto_reply

a_generate_reply async #

a_generate_reply(messages=None, sender=None, **kwargs)

(async) Reply based on the conversation history and the sender.

Either messages or sender must be provided. Register a reply_func with None as one trigger for it to be activated when messages is non-empty and sender is None. Use registered auto reply functions to generate replies. By default, the following functions are checked in order: 1. check_termination_and_human_reply 2. generate_function_call_reply 3. generate_tool_calls_reply 4. generate_code_execution_reply 5. generate_oai_reply Every function returns a tuple (final, reply). When a function returns final=False, the next function will be checked. So by default, termination and human reply will be checked first. If not terminating and human reply is skipped, execute function or code and return the result. AI replies are generated only when no code execution is performed.

PARAMETER DESCRIPTION
messages

a list of messages in the conversation history.

TYPE: Optional[list[dict[str, Any]]] DEFAULT: None

sender

sender of an Agent instance.

TYPE: Optional[Agent] DEFAULT: None

**kwargs

Additional arguments to customize reply generation. Supported kwargs: - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from the reply generation process. Functions in this list will be skipped even if they would normally be triggered.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
Union[str, dict[str, Any], None]

str or dict or None: reply. None if no reply is generated.

Source code in autogen/agentchat/conversable_agent.py
async def a_generate_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional["Agent"] = None,
    **kwargs: Any,
) -> Union[str, dict[str, Any], None]:
    """(async) Reply based on the conversation history and the sender.

    Either messages or sender must be provided.
    Register a reply_func with `None` as one trigger for it to be activated when `messages` is non-empty and `sender` is `None`.
    Use registered auto reply functions to generate replies.
    By default, the following functions are checked in order:
    1. check_termination_and_human_reply
    2. generate_function_call_reply
    3. generate_tool_calls_reply
    4. generate_code_execution_reply
    5. generate_oai_reply
    Every function returns a tuple (final, reply).
    When a function returns final=False, the next function will be checked.
    So by default, termination and human reply will be checked first.
    If not terminating and human reply is skipped, execute function or code and return the result.
    AI replies are generated only when no code execution is performed.

    Args:
        messages: a list of messages in the conversation history.
        sender: sender of an Agent instance.
        **kwargs (Any): Additional arguments to customize reply generation. Supported kwargs:
            - exclude (List[Callable[..., Any]]): A list of reply functions to exclude from
            the reply generation process. Functions in this list will be skipped even if
            they would normally be triggered.

    Returns:
        str or dict or None: reply. None if no reply is generated.
    """
    if all((messages is None, sender is None)):
        error_msg = f"Either {messages=} or {sender=} must be provided."
        logger.error(error_msg)
        raise AssertionError(error_msg)

    if messages is None:
        messages = self._oai_messages[sender]

    # Call the hookable method that gives registered hooks a chance to update agent state, used for their context variables.
    self.update_agent_state_before_reply(messages)

    # Call the hookable method that gives registered hooks a chance to process the last message.
    # Message modifications do not affect the incoming messages or self._oai_messages.
    messages = self.process_last_received_message(messages)

    # Call the hookable method that gives registered hooks a chance to process all messages.
    # Message modifications do not affect the incoming messages or self._oai_messages.
    messages = self.process_all_messages_before_reply(messages)

    for reply_func_tuple in self._reply_func_list:
        reply_func = reply_func_tuple["reply_func"]
        if "exclude" in kwargs and reply_func in kwargs["exclude"]:
            continue

        if self._match_trigger(reply_func_tuple["trigger"], sender):
            if inspect.iscoroutinefunction(reply_func):
                final, reply = await reply_func(
                    self, messages=messages, sender=sender, config=reply_func_tuple["config"]
                )
            else:
                final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
            if final:
                return reply
    return self._default_auto_reply

update_system_message #

update_system_message(system_message)

Update the system message.

PARAMETER DESCRIPTION
system_message

system message for the ChatCompletion inference.

TYPE: str

Source code in autogen/agentchat/conversable_agent.py
def update_system_message(self, system_message: str) -> None:
    """Update the system message.

    Args:
        system_message (str): system message for the ChatCompletion inference.
    """
    self._oai_system_message[0]["content"] = system_message

register_reply #

register_reply(trigger, reply_func, position=0, config=None, reset_config=None, *, ignore_async_in_sync_chat=False, remove_other_reply_funcs=False)

Register a reply function.

The reply function will be called when the trigger matches the sender. The function registered later will be checked earlier by default. To change the order, set the position to a positive integer.

Both sync and async reply functions can be registered. The sync reply function will be triggered from both sync and async chats. However, an async reply function will only be triggered from async chats (initiated with ConversableAgent.a_initiate_chat). If an async reply function is registered and a chat is initialized with a sync function, ignore_async_in_sync_chat determines the behaviour as follows: if ignore_async_in_sync_chat is set to False (default value), an exception will be raised, and if ignore_async_in_sync_chat is set to True, the reply function will be ignored.

PARAMETER DESCRIPTION
trigger

the trigger. If a class is provided, the reply function will be called when the sender is an instance of the class. If a string is provided, the reply function will be called when the sender's name matches the string. If an agent instance is provided, the reply function will be called when the sender is the agent instance. If a callable is provided, the reply function will be called when the callable returns True. If a list is provided, the reply function will be called when any of the triggers in the list is activated. If None is provided, the reply function will be called only when the sender is None. Note: Be sure to register None as a trigger if you would like to trigger an auto-reply function with non-empty messages and sender=None.

TYPE: Agent class, str, Agent instance, callable, or list

reply_func

the reply function. The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.

def reply_func(
    recipient: ConversableAgent,
    messages: Optional[List[Dict]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:

TYPE: Callable

position

the position of the reply function in the reply function list. The function registered later will be checked earlier by default. To change the order, set the position to a positive integer.

TYPE: int DEFAULT: 0

config

the config to be passed to the reply function. When an agent is reset, the config will be reset to the original value.

TYPE: Any DEFAULT: None

reset_config

the function to reset the config. The function returns None. Signature: def reset_config(config: Any)

TYPE: Callable DEFAULT: None

ignore_async_in_sync_chat

whether to ignore the async reply function in sync chats. If False, an exception will be raised if an async reply function is registered and a chat is initialized with a sync function.

TYPE: bool DEFAULT: False

remove_other_reply_funcs

whether to remove other reply functions when registering this reply function.

TYPE: bool DEFAULT: False

Source code in autogen/agentchat/conversable_agent.py
def register_reply(
    self,
    trigger: Union[type[Agent], str, Agent, Callable[[Agent], bool], list],
    reply_func: Callable,
    position: int = 0,
    config: Optional[Any] = None,
    reset_config: Optional[Callable[..., Any]] = None,
    *,
    ignore_async_in_sync_chat: bool = False,
    remove_other_reply_funcs: bool = False,
):
    """Register a reply function.

    The reply function will be called when the trigger matches the sender.
    The function registered later will be checked earlier by default.
    To change the order, set the position to a positive integer.

    Both sync and async reply functions can be registered. The sync reply function will be triggered
    from both sync and async chats. However, an async reply function will only be triggered from async
    chats (initiated with `ConversableAgent.a_initiate_chat`). If an `async` reply function is registered
    and a chat is initialized with a sync function, `ignore_async_in_sync_chat` determines the behaviour as follows:
        if `ignore_async_in_sync_chat` is set to `False` (default value), an exception will be raised, and
        if `ignore_async_in_sync_chat` is set to `True`, the reply function will be ignored.

    Args:
        trigger (Agent class, str, Agent instance, callable, or list): the trigger.
            If a class is provided, the reply function will be called when the sender is an instance of the class.
            If a string is provided, the reply function will be called when the sender's name matches the string.
            If an agent instance is provided, the reply function will be called when the sender is the agent instance.
            If a callable is provided, the reply function will be called when the callable returns True.
            If a list is provided, the reply function will be called when any of the triggers in the list is activated.
            If None is provided, the reply function will be called only when the sender is None.
            Note: Be sure to register `None` as a trigger if you would like to trigger an auto-reply function with non-empty messages and `sender=None`.
        reply_func (Callable): the reply function.
            The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.

            ```python
            def reply_func(
                recipient: ConversableAgent,
                messages: Optional[List[Dict]] = None,
                sender: Optional[Agent] = None,
                config: Optional[Any] = None,
            ) -> Tuple[bool, Union[str, Dict, None]]:
            ```
        position (int): the position of the reply function in the reply function list.
            The function registered later will be checked earlier by default.
            To change the order, set the position to a positive integer.
        config (Any): the config to be passed to the reply function.
            When an agent is reset, the config will be reset to the original value.
        reset_config (Callable): the function to reset the config.
            The function returns None. Signature: ```def reset_config(config: Any)```
        ignore_async_in_sync_chat (bool): whether to ignore the async reply function in sync chats. If `False`, an exception
            will be raised if an async reply function is registered and a chat is initialized with a sync
            function.
        remove_other_reply_funcs (bool): whether to remove other reply functions when registering this reply function.
    """
    if not isinstance(trigger, (type, str, Agent, Callable, list)):
        raise ValueError("trigger must be a class, a string, an agent, a callable or a list.")
    if remove_other_reply_funcs:
        self._reply_func_list.clear()
    self._reply_func_list.insert(
        position,
        {
            "trigger": trigger,
            "reply_func": reply_func,
            "config": copy.copy(config),
            "init_config": config,
            "reset_config": reset_config,
            "ignore_async_in_sync_chat": ignore_async_in_sync_chat and inspect.iscoroutinefunction(reply_func),
        },
    )

replace_reply_func #

replace_reply_func(old_reply_func, new_reply_func)

Replace a registered reply function with a new one.

PARAMETER DESCRIPTION
old_reply_func

the old reply function to be replaced.

TYPE: Callable

new_reply_func

the new reply function to replace the old one.

TYPE: Callable

Source code in autogen/agentchat/conversable_agent.py
def replace_reply_func(self, old_reply_func: Callable, new_reply_func: Callable):
    """Replace a registered reply function with a new one.

    Args:
        old_reply_func (Callable): the old reply function to be replaced.
        new_reply_func (Callable): the new reply function to replace the old one.
    """
    for f in self._reply_func_list:
        if f["reply_func"] == old_reply_func:
            f["reply_func"] = new_reply_func

register_nested_chats #

register_nested_chats(chat_queue, trigger, reply_func_from_nested_chats='summary_from_nested_chats', position=2, use_async=None, **kwargs)

Register a nested chat reply function.

PARAMETER DESCRIPTION
chat_queue

a list of chat objects to be initiated. If use_async is used, then all messages in chat_queue must have a chat-id associated with them.

TYPE: list

trigger

refer to register_reply for details.

TYPE: Agent class, str, Agent instance, callable, or list

reply_func_from_nested_chats

the reply function for the nested chat. The function takes a chat_queue for nested chat, recipient agent, a list of messages, a sender agent and a config as input and returns a reply message. Default to "summary_from_nested_chats", which corresponds to a built-in reply function that get summary from the nested chat_queue.

def reply_func_from_nested_chats(
    chat_queue: List[Dict],
    recipient: ConversableAgent,
    messages: Optional[List[Dict]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:

TYPE: (Callable, str) DEFAULT: 'summary_from_nested_chats'

position

Ref to register_reply for details. Default to 2. It means we first check the termination and human reply, then check the registered nested chat reply.

TYPE: int DEFAULT: 2

use_async

Uses a_initiate_chats internally to start nested chats. If the original chat is initiated with a_initiate_chats, you may set this to true so nested chats do not run in sync.

TYPE: Union[bool, None] DEFAULT: None

kwargs

Ref to register_reply for details.

TYPE: Any DEFAULT: {}

Source code in autogen/agentchat/conversable_agent.py
def register_nested_chats(
    self,
    chat_queue: list[dict[str, Any]],
    trigger: Union[type[Agent], str, Agent, Callable[[Agent], bool], list],
    reply_func_from_nested_chats: Union[str, Callable[..., Any]] = "summary_from_nested_chats",
    position: int = 2,
    use_async: Union[bool, None] = None,
    **kwargs: Any,
) -> None:
    """Register a nested chat reply function.

    Args:
        chat_queue (list): a list of chat objects to be initiated. If use_async is used, then all messages in chat_queue must have a chat-id associated with them.
        trigger (Agent class, str, Agent instance, callable, or list): refer to `register_reply` for details.
        reply_func_from_nested_chats (Callable, str): the reply function for the nested chat.
            The function takes a chat_queue for nested chat, recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
            Default to "summary_from_nested_chats", which corresponds to a built-in reply function that get summary from the nested chat_queue.
            ```python
            def reply_func_from_nested_chats(
                chat_queue: List[Dict],
                recipient: ConversableAgent,
                messages: Optional[List[Dict]] = None,
                sender: Optional[Agent] = None,
                config: Optional[Any] = None,
            ) -> Tuple[bool, Union[str, Dict, None]]:
            ```
        position (int): Ref to `register_reply` for details. Default to 2. It means we first check the termination and human reply, then check the registered nested chat reply.
        use_async: Uses a_initiate_chats internally to start nested chats. If the original chat is initiated with a_initiate_chats, you may set this to true so nested chats do not run in sync.
        kwargs: Ref to `register_reply` for details.
    """
    if use_async:
        for chat in chat_queue:
            if chat.get("chat_id") is None:
                raise ValueError("chat_id is required for async nested chats")

    if use_async:
        if reply_func_from_nested_chats == "summary_from_nested_chats":
            reply_func_from_nested_chats = self._a_summary_from_nested_chats
        if not callable(reply_func_from_nested_chats) or not inspect.iscoroutinefunction(
            reply_func_from_nested_chats
        ):
            raise ValueError("reply_func_from_nested_chats must be a callable and a coroutine")

        async def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
            return await reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)

    else:
        if reply_func_from_nested_chats == "summary_from_nested_chats":
            reply_func_from_nested_chats = self._summary_from_nested_chats
        if not callable(reply_func_from_nested_chats):
            raise ValueError("reply_func_from_nested_chats must be a callable")

        def wrapped_reply_func(recipient, messages=None, sender=None, config=None):
            return reply_func_from_nested_chats(chat_queue, recipient, messages, sender, config)

    functools.update_wrapper(wrapped_reply_func, reply_func_from_nested_chats)

    self.register_reply(
        trigger,
        wrapped_reply_func,
        position,
        kwargs.get("config"),
        kwargs.get("reset_config"),
        ignore_async_in_sync_chat=(
            not use_async if use_async is not None else kwargs.get("ignore_async_in_sync_chat")
        ),
    )

get_context #

get_context(key, default=None)

Get a context variable by key.

PARAMETER DESCRIPTION
key

The key to look up

TYPE: str

default

Value to return if key doesn't exist

TYPE: Any DEFAULT: None

Returns: The value associated with the key, or default if not found

Source code in autogen/agentchat/conversable_agent.py
def get_context(self, key: str, default: Any = None) -> Any:
    """Get a context variable by key.

    Args:
        key: The key to look up
        default: Value to return if key doesn't exist
    Returns:
        The value associated with the key, or default if not found
    """
    return self._context_variables.get(key, default)

set_context #

set_context(key, value)

Set a context variable.

PARAMETER DESCRIPTION
key

The key to set

TYPE: str

value

The value to associate with the key

TYPE: Any

Source code in autogen/agentchat/conversable_agent.py
def set_context(self, key: str, value: Any) -> None:
    """Set a context variable.

    Args:
        key: The key to set
        value: The value to associate with the key
    """
    self._context_variables[key] = value

update_context #

update_context(context_variables)

Update multiple context variables at once.

PARAMETER DESCRIPTION
context_variables

Dictionary of variables to update/add

TYPE: dict[str, Any]

Source code in autogen/agentchat/conversable_agent.py
def update_context(self, context_variables: dict[str, Any]) -> None:
    """Update multiple context variables at once.

    Args:
        context_variables: Dictionary of variables to update/add
    """
    self._context_variables.update(context_variables)

pop_context #

pop_context(key, default=None)

Remove and return a context variable.

PARAMETER DESCRIPTION
key

The key to remove

TYPE: str

default

Value to return if key doesn't exist

TYPE: Any DEFAULT: None

Returns: The value that was removed, or default if key not found

Source code in autogen/agentchat/conversable_agent.py
def pop_context(self, key: str, default: Any = None) -> Any:
    """Remove and return a context variable.

    Args:
        key: The key to remove
        default: Value to return if key doesn't exist
    Returns:
        The value that was removed, or default if key not found
    """
    return self._context_variables.pop(key, default)

update_max_consecutive_auto_reply #

update_max_consecutive_auto_reply(value, sender=None)

Update the maximum number of consecutive auto replies.

PARAMETER DESCRIPTION
value

the maximum number of consecutive auto replies.

TYPE: int

sender

when the sender is provided, only update the max_consecutive_auto_reply for that sender.

TYPE: Agent DEFAULT: None

Source code in autogen/agentchat/conversable_agent.py
def update_max_consecutive_auto_reply(self, value: int, sender: Optional[Agent] = None):
    """Update the maximum number of consecutive auto replies.

    Args:
        value (int): the maximum number of consecutive auto replies.
        sender (Agent): when the sender is provided, only update the max_consecutive_auto_reply for that sender.
    """
    if sender is None:
        self._max_consecutive_auto_reply = value
        for k in self._max_consecutive_auto_reply_dict:
            self._max_consecutive_auto_reply_dict[k] = value
    else:
        self._max_consecutive_auto_reply_dict[sender] = value

max_consecutive_auto_reply #

max_consecutive_auto_reply(sender=None)

The maximum number of consecutive auto replies.

Source code in autogen/agentchat/conversable_agent.py
def max_consecutive_auto_reply(self, sender: Optional[Agent] = None) -> int:
    """The maximum number of consecutive auto replies."""
    return self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender]

chat_messages_for_summary #

chat_messages_for_summary(agent)

A list of messages as a conversation to summarize.

Source code in autogen/agentchat/conversable_agent.py
def chat_messages_for_summary(self, agent: Agent) -> list[dict[str, Any]]:
    """A list of messages as a conversation to summarize."""
    return self._oai_messages[agent]

last_message #

last_message(agent=None)

The last message exchanged with the agent.

PARAMETER DESCRIPTION
agent

The agent in the conversation. If None and more than one agent's conversations are found, an error will be raised. If None and only one conversation is found, the last message of the only conversation will be returned.

TYPE: Agent DEFAULT: None

RETURNS DESCRIPTION
Optional[dict[str, Any]]

The last message exchanged with the agent.

Source code in autogen/agentchat/conversable_agent.py
def last_message(self, agent: Optional[Agent] = None) -> Optional[dict[str, Any]]:
    """The last message exchanged with the agent.

    Args:
        agent (Agent): The agent in the conversation.
            If None and more than one agent's conversations are found, an error will be raised.
            If None and only one conversation is found, the last message of the only conversation will be returned.

    Returns:
        The last message exchanged with the agent.
    """
    if agent is None:
        n_conversations = len(self._oai_messages)
        if n_conversations == 0:
            return None
        if n_conversations == 1:
            for conversation in self._oai_messages.values():
                return conversation[-1]
        raise ValueError("More than one conversation is found. Please specify the sender to get the last message.")
    if agent not in self._oai_messages:
        raise KeyError(
            f"The agent '{agent.name}' is not present in any conversation. No history available for this agent."
        )
    return self._oai_messages[agent][-1]

initiate_chat #

initiate_chat(recipient, clear_history=True, silent=False, cache=None, max_turns=None, summary_method=DEFAULT_SUMMARY_METHOD, summary_args={}, message=None, **kwargs)

Initiate a chat with the recipient agent.

Reset the consecutive auto reply counter. If clear_history is True, the chat history with the recipient agent will be cleared.

PARAMETER DESCRIPTION
recipient

the recipient agent.

TYPE: ConversableAgent

clear_history

whether to clear the chat history with the agent. Default is True.

TYPE: bool DEFAULT: True

silent

(Experimental) whether to print the messages for this conversation. Default is False.

TYPE: bool or None DEFAULT: False

cache

the cache client to be used for this conversation. Default is None.

TYPE: AbstractCache or None DEFAULT: None

max_turns

the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from max_consecutive_auto_reply which is the maximum number of consecutive auto replies; and it is also different from max_rounds in GroupChat which is the maximum number of rounds in a group chat session. If max_turns is set to None, the chat will continue until a termination condition is met. Default is None.

TYPE: int or None DEFAULT: None

summary_method

a method to get a summary from the chat. Default is DEFAULT_SUMMARY_METHOD, i.e., "last_msg". Supported strings are "last_msg" and "reflection_with_llm": - when set to "last_msg", it returns the last message of the dialog as the summary. - when set to "reflection_with_llm", it returns a summary extracted using an llm client. llm_config must be set in either the recipient or sender.

A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g.,

def my_summary_method(
    sender: ConversableAgent,
    recipient: ConversableAgent,
    summary_args: dict,
):
    return recipient.last_message(sender)["content"]

TYPE: str or callable DEFAULT: DEFAULT_SUMMARY_METHOD

summary_args

a dictionary of arguments to be passed to the summary_method. One example key is "summary_prompt", and value is a string of text used to prompt a LLM-based agent (the sender or recipient agent) to reflect on the conversation and extract a summary when summary_method is "reflection_with_llm". The default summary_prompt is DEFAULT_SUMMARY_PROMPT, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out." Another available key is "summary_role", which is the role of the message sent to the agent in charge of summarizing. Default is "system".

TYPE: dict DEFAULT: {}

message

the initial message to be sent to the recipient. Needs to be provided. Otherwise, input() will be called to get the initial message. - If a string or a dict is provided, it will be used as the initial message. generate_init_message is called to generate the initial message for the agent based on this string and the context. If dict, it may contain the following reserved fields (either content or tool_calls need to be provided).

    1. "content": content of the message, can be None.
    2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
    3. "tool_calls": a list of dictionaries containing the function name and arguments.
    4. "role": role of the message, can be "assistant", "user", "function".
        This field is only needed to distinguish between "function" or "assistant"/"user".
    5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
    6. "context" (dict): the context of the message, which will be passed to
        [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).
  • If a callable is provided, it will be called to get the initial message in the form of a string or a dict. If the returned type is dict, it may contain the reserved fields mentioned above.

    Example of a callable message (returning a string):

    def my_message(
        sender: ConversableAgent, recipient: ConversableAgent, context: dict
    ) -> Union[str, Dict]:
        carryover = context.get("carryover", "")
        if isinstance(message, list):
            carryover = carryover[-1]
        final_msg = "Write a blogpost." + "\nContext: \n" + carryover
        return final_msg
    

    Example of a callable message (returning a dict):

    def my_message(
        sender: ConversableAgent, recipient: ConversableAgent, context: dict
    ) -> Union[str, Dict]:
        final_msg = {}
        carryover = context.get("carryover", "")
        if isinstance(message, list):
            carryover = carryover[-1]
        final_msg["content"] = "Write a blogpost." + "\nContext: \n" + carryover
        final_msg["context"] = {"prefix": "Today I feel"}
        return final_msg
    

TYPE: (str, dict or Callable) DEFAULT: None

**kwargs

any additional information. It has the following reserved fields: - "carryover": a string or a list of string to specify the carryover information to be passed to this chat. If provided, we will combine this carryover (by attaching a "context: " string and the carryover content after the message content) with the "message" content when generating the initial chat message in generate_init_message. - "verbose": a boolean to specify whether to print the message and carryover in a chat. Default is False.

TYPE: Any DEFAULT: {}

RAISES DESCRIPTION
RuntimeError

if any async reply functions are registered and not ignored in sync chat.

RETURNS DESCRIPTION
ChatResult

an ChatResult object.

TYPE: ChatResult

Source code in autogen/agentchat/conversable_agent.py
def initiate_chat(
    self,
    recipient: "ConversableAgent",
    clear_history: bool = True,
    silent: Optional[bool] = False,
    cache: Optional[AbstractCache] = None,
    max_turns: Optional[int] = None,
    summary_method: Optional[Union[str, Callable[..., Any]]] = DEFAULT_SUMMARY_METHOD,
    summary_args: Optional[dict[str, Any]] = {},
    message: Optional[Union[dict[str, Any], str, Callable[..., Any]]] = None,
    **kwargs: Any,
) -> ChatResult:
    """Initiate a chat with the recipient agent.

    Reset the consecutive auto reply counter.
    If `clear_history` is True, the chat history with the recipient agent will be cleared.


    Args:
        recipient: the recipient agent.
        clear_history (bool): whether to clear the chat history with the agent. Default is True.
        silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
        cache (AbstractCache or None): the cache client to be used for this conversation. Default is None.
        max_turns (int or None): the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from
            [max_consecutive_auto_reply](#max-consecutive-auto-reply) which is the maximum number of consecutive auto replies; and it is also different from [max_rounds in GroupChat](./groupchat) which is the maximum number of rounds in a group chat session.
            If max_turns is set to None, the chat will continue until a termination condition is met. Default is None.
        summary_method (str or callable): a method to get a summary from the chat. Default is DEFAULT_SUMMARY_METHOD, i.e., "last_msg".
            Supported strings are "last_msg" and "reflection_with_llm":
                - when set to "last_msg", it returns the last message of the dialog as the summary.
                - when set to "reflection_with_llm", it returns a summary extracted using an llm client.
                    `llm_config` must be set in either the recipient or sender.

            A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g.,

            ```python
            def my_summary_method(
                sender: ConversableAgent,
                recipient: ConversableAgent,
                summary_args: dict,
            ):
                return recipient.last_message(sender)["content"]
            ```
        summary_args (dict): a dictionary of arguments to be passed to the summary_method.
            One example key is "summary_prompt", and value is a string of text used to prompt a LLM-based agent (the sender or recipient agent) to reflect
            on the conversation and extract a summary when summary_method is "reflection_with_llm".
            The default summary_prompt is DEFAULT_SUMMARY_PROMPT, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
            Another available key is "summary_role", which is the role of the message sent to the agent in charge of summarizing. Default is "system".
        message (str, dict or Callable): the initial message to be sent to the recipient. Needs to be provided. Otherwise, input() will be called to get the initial message.
            - If a string or a dict is provided, it will be used as the initial message.        `generate_init_message` is called to generate the initial message for the agent based on this string and the context.
                If dict, it may contain the following reserved fields (either content or tool_calls need to be provided).

                    1. "content": content of the message, can be None.
                    2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
                    3. "tool_calls": a list of dictionaries containing the function name and arguments.
                    4. "role": role of the message, can be "assistant", "user", "function".
                        This field is only needed to distinguish between "function" or "assistant"/"user".
                    5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
                    6. "context" (dict): the context of the message, which will be passed to
                        [OpenAIWrapper.create](/docs/api-reference/autogen/OpenAIWrapper#autogen.OpenAIWrapper.create).

            - If a callable is provided, it will be called to get the initial message in the form of a string or a dict.
                If the returned type is dict, it may contain the reserved fields mentioned above.

                Example of a callable message (returning a string):

                ```python
                def my_message(
                    sender: ConversableAgent, recipient: ConversableAgent, context: dict
                ) -> Union[str, Dict]:
                    carryover = context.get("carryover", "")
                    if isinstance(message, list):
                        carryover = carryover[-1]
                    final_msg = "Write a blogpost." + "\\nContext: \\n" + carryover
                    return final_msg
                ```

                Example of a callable message (returning a dict):

                ```python
                def my_message(
                    sender: ConversableAgent, recipient: ConversableAgent, context: dict
                ) -> Union[str, Dict]:
                    final_msg = {}
                    carryover = context.get("carryover", "")
                    if isinstance(message, list):
                        carryover = carryover[-1]
                    final_msg["content"] = "Write a blogpost." + "\\nContext: \\n" + carryover
                    final_msg["context"] = {"prefix": "Today I feel"}
                    return final_msg
                ```
        **kwargs: any additional information. It has the following reserved fields:
            - "carryover": a string or a list of string to specify the carryover information to be passed to this chat.
                If provided, we will combine this carryover (by attaching a "context: " string and the carryover content after the message content) with the "message" content when generating the initial chat
                message in `generate_init_message`.
            - "verbose": a boolean to specify whether to print the message and carryover in a chat. Default is False.

    Raises:
        RuntimeError: if any async reply functions are registered and not ignored in sync chat.

    Returns:
        ChatResult: an ChatResult object.
    """
    _chat_info = locals().copy()
    _chat_info["sender"] = self
    consolidate_chat_info(_chat_info, uniform_sender=self)
    for agent in [self, recipient]:
        agent._raise_exception_on_async_reply_functions()
        agent.previous_cache = agent.client_cache
        agent.client_cache = cache
    if isinstance(max_turns, int):
        self._prepare_chat(recipient, clear_history, reply_at_receive=False)
        for _ in range(max_turns):
            if _ == 0:
                if isinstance(message, Callable):
                    msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
                else:
                    msg2send = self.generate_init_message(message, **kwargs)
            else:
                msg2send = self.generate_reply(messages=self.chat_messages[recipient], sender=recipient)
            if msg2send is None:
                break
            self.send(msg2send, recipient, request_reply=True, silent=silent)
    else:
        self._prepare_chat(recipient, clear_history)
        if isinstance(message, Callable):
            msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
        else:
            msg2send = self.generate_init_message(message, **kwargs)
        self.send(msg2send, recipient, silent=silent)
    summary = self._summarize_chat(
        summary_method,
        summary_args,
        recipient,
        cache=cache,
    )
    for agent in [self, recipient]:
        agent.client_cache = agent.previous_cache
        agent.previous_cache = None
    chat_result = ChatResult(
        chat_history=self.chat_messages[recipient],
        summary=summary,
        cost=gather_usage_summary([self, recipient]),
        human_input=self._human_input,
    )
    return chat_result

a_initiate_chat async #

a_initiate_chat(recipient, clear_history=True, silent=False, cache=None, max_turns=None, summary_method=DEFAULT_SUMMARY_METHOD, summary_args={}, message=None, **kwargs)

(async) Initiate a chat with the recipient agent.

Reset the consecutive auto reply counter. If clear_history is True, the chat history with the recipient agent will be cleared. a_generate_init_message is called to generate the initial message for the agent.

Args: Please refer to initiate_chat.

RETURNS DESCRIPTION
ChatResult

an ChatResult object.

TYPE: ChatResult

Source code in autogen/agentchat/conversable_agent.py
async def a_initiate_chat(
    self,
    recipient: "ConversableAgent",
    clear_history: bool = True,
    silent: Optional[bool] = False,
    cache: Optional[AbstractCache] = None,
    max_turns: Optional[int] = None,
    summary_method: Optional[Union[str, Callable[..., Any]]] = DEFAULT_SUMMARY_METHOD,
    summary_args: Optional[dict[str, Any]] = {},
    message: Optional[Union[str, Callable[..., Any]]] = None,
    **kwargs: Any,
) -> ChatResult:
    """(async) Initiate a chat with the recipient agent.

    Reset the consecutive auto reply counter.
    If `clear_history` is True, the chat history with the recipient agent will be cleared.
    `a_generate_init_message` is called to generate the initial message for the agent.

    Args: Please refer to `initiate_chat`.

    Returns:
        ChatResult: an ChatResult object.
    """
    _chat_info = locals().copy()
    _chat_info["sender"] = self
    consolidate_chat_info(_chat_info, uniform_sender=self)
    for agent in [self, recipient]:
        agent.previous_cache = agent.client_cache
        agent.client_cache = cache
    if isinstance(max_turns, int):
        self._prepare_chat(recipient, clear_history, reply_at_receive=False)
        for _ in range(max_turns):
            if _ == 0:
                if isinstance(message, Callable):
                    msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
                else:
                    msg2send = await self.a_generate_init_message(message, **kwargs)
            else:
                msg2send = await self.a_generate_reply(messages=self.chat_messages[recipient], sender=recipient)
            if msg2send is None:
                break
            await self.a_send(msg2send, recipient, request_reply=True, silent=silent)
    else:
        self._prepare_chat(recipient, clear_history)
        if isinstance(message, Callable):
            msg2send = message(_chat_info["sender"], _chat_info["recipient"], kwargs)
        else:
            msg2send = await self.a_generate_init_message(message, **kwargs)
        await self.a_send(msg2send, recipient, silent=silent)
    summary = self._summarize_chat(
        summary_method,
        summary_args,
        recipient,
        cache=cache,
    )
    for agent in [self, recipient]:
        agent.client_cache = agent.previous_cache
        agent.previous_cache = None
    chat_result = ChatResult(
        chat_history=self.chat_messages[recipient],
        summary=summary,
        cost=gather_usage_summary([self, recipient]),
        human_input=self._human_input,
    )
    return chat_result

initiate_chats #

initiate_chats(chat_queue)

(Experimental) Initiate chats with multiple agents.

PARAMETER DESCRIPTION
chat_queue

a list of dictionaries containing the information of the chats. Each dictionary should contain the input arguments for initiate_chat

TYPE: List[Dict]

Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.

Source code in autogen/agentchat/conversable_agent.py
def initiate_chats(self, chat_queue: list[dict[str, Any]]) -> list[ChatResult]:
    """(Experimental) Initiate chats with multiple agents.

    Args:
        chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
            Each dictionary should contain the input arguments for [`initiate_chat`](#initiate-chat)

    Returns: a list of ChatResult objects corresponding to the finished chats in the chat_queue.
    """
    _chat_queue = self._check_chat_queue_for_sender(chat_queue)
    self._finished_chats = initiate_chats(_chat_queue)
    return self._finished_chats

a_initiate_chats async #

a_initiate_chats(chat_queue)
Source code in autogen/agentchat/conversable_agent.py
async def a_initiate_chats(self, chat_queue: list[dict[str, Any]]) -> dict[int, ChatResult]:
    _chat_queue = self._check_chat_queue_for_sender(chat_queue)
    self._finished_chats = await a_initiate_chats(_chat_queue)
    return self._finished_chats

get_chat_results #

get_chat_results(chat_index=None)

A summary from the finished chats of particular agents.

Source code in autogen/agentchat/conversable_agent.py
def get_chat_results(self, chat_index: Optional[int] = None) -> Union[list[ChatResult], ChatResult]:
    """A summary from the finished chats of particular agents."""
    if chat_index is not None:
        return self._finished_chats[chat_index]
    else:
        return self._finished_chats

reset #

reset()

Reset the agent.

Source code in autogen/agentchat/conversable_agent.py
def reset(self) -> None:
    """Reset the agent."""
    self.clear_history()
    self.reset_consecutive_auto_reply_counter()
    self.stop_reply_at_receive()
    if self.client is not None:
        self.client.clear_usage_summary()
    for reply_func_tuple in self._reply_func_list:
        if reply_func_tuple["reset_config"] is not None:
            reply_func_tuple["reset_config"](reply_func_tuple["config"])
        else:
            reply_func_tuple["config"] = copy.copy(reply_func_tuple["init_config"])

stop_reply_at_receive #

stop_reply_at_receive(sender=None)

Reset the reply_at_receive of the sender.

Source code in autogen/agentchat/conversable_agent.py
def stop_reply_at_receive(self, sender: Optional[Agent] = None):
    """Reset the reply_at_receive of the sender."""
    if sender is None:
        self.reply_at_receive.clear()
    else:
        self.reply_at_receive[sender] = False

reset_consecutive_auto_reply_counter #

reset_consecutive_auto_reply_counter(sender=None)

Reset the consecutive_auto_reply_counter of the sender.

Source code in autogen/agentchat/conversable_agent.py
def reset_consecutive_auto_reply_counter(self, sender: Optional[Agent] = None):
    """Reset the consecutive_auto_reply_counter of the sender."""
    if sender is None:
        self._consecutive_auto_reply_counter.clear()
    else:
        self._consecutive_auto_reply_counter[sender] = 0

clear_history #

clear_history(recipient=None, nr_messages_to_preserve=None)

Clear the chat history of the agent.

PARAMETER DESCRIPTION
recipient

the agent with whom the chat history to clear. If None, clear the chat history with all agents.

TYPE: Optional[Agent] DEFAULT: None

nr_messages_to_preserve

the number of newest messages to preserve in the chat history.

TYPE: Optional[int] DEFAULT: None

Source code in autogen/agentchat/conversable_agent.py
def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preserve: Optional[int] = None):
    """Clear the chat history of the agent.

    Args:
        recipient: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
        nr_messages_to_preserve: the number of newest messages to preserve in the chat history.
    """
    iostream = IOStream.get_default()
    if recipient is None:
        no_messages_preserved = 0
        if nr_messages_to_preserve:
            for key in self._oai_messages:
                nr_messages_to_preserve_internal = nr_messages_to_preserve
                # if breaking history between function call and function response, save function call message
                # additionally, otherwise openai will return error
                first_msg_to_save = self._oai_messages[key][-nr_messages_to_preserve_internal]
                if "tool_responses" in first_msg_to_save:
                    nr_messages_to_preserve_internal += 1
                    # clear_conversable_agent_history.print_preserving_message(iostream.print)
                    no_messages_preserved += 1
                # Remove messages from history except last `nr_messages_to_preserve` messages.
                self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:]
            iostream.send(
                ClearConversableAgentHistoryMessage(agent=self, no_messages_preserved=no_messages_preserved)
            )
        else:
            self._oai_messages.clear()
    else:
        self._oai_messages[recipient].clear()
        # clear_conversable_agent_history.print_warning(iostream.print)
        if nr_messages_to_preserve:
            iostream.send(ClearConversableAgentHistoryWarningMessage(recipient=self))

generate_oai_reply #

generate_oai_reply(messages=None, sender=None, config=None)

Generate a reply using autogen.oai.

Source code in autogen/agentchat/conversable_agent.py
def generate_oai_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[OpenAIWrapper] = None,
) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
    """Generate a reply using autogen.oai."""
    client = self.client if config is None else config
    if client is None:
        return False, None
    if messages is None:
        messages = self._oai_messages[sender]
    extracted_response = self._generate_oai_reply_from_client(
        client, self._oai_system_message + messages, self.client_cache
    )
    return (False, None) if extracted_response is None else (True, extracted_response)

a_generate_oai_reply async #

a_generate_oai_reply(messages=None, sender=None, config=None)

Generate a reply using autogen.oai asynchronously.

Source code in autogen/agentchat/conversable_agent.py
async def a_generate_oai_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
    """Generate a reply using autogen.oai asynchronously."""
    iostream = IOStream.get_default()

    def _generate_oai_reply(
        self, iostream: IOStream, *args: Any, **kwargs: Any
    ) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
        with IOStream.set_default(iostream):
            return self.generate_oai_reply(*args, **kwargs)

    return await asyncio.get_event_loop().run_in_executor(
        None,
        functools.partial(
            _generate_oai_reply, self=self, iostream=iostream, messages=messages, sender=sender, config=config
        ),
    )

generate_code_execution_reply #

generate_code_execution_reply(messages=None, sender=None, config=None)

Generate a reply using code execution.

Source code in autogen/agentchat/conversable_agent.py
def generate_code_execution_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Union[dict[str, Any], Literal[False]]] = None,
):
    """Generate a reply using code execution."""
    code_execution_config = config if config is not None else self._code_execution_config
    if code_execution_config is False:
        return False, None
    if messages is None:
        messages = self._oai_messages[sender]
    last_n_messages = code_execution_config.pop("last_n_messages", "auto")

    if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
        raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")

    messages_to_scan = last_n_messages
    if last_n_messages == "auto":
        # Find when the agent last spoke
        messages_to_scan = 0
        for i in range(len(messages)):
            message = messages[-(i + 1)]
            if "role" not in message or message["role"] != "user":
                break
            else:
                messages_to_scan += 1

    # iterate through the last n messages in reverse
    # if code blocks are found, execute the code blocks and return the output
    # if no code blocks are found, continue
    for i in range(min(len(messages), messages_to_scan)):
        message = messages[-(i + 1)]
        if not message["content"]:
            continue
        code_blocks = extract_code(message["content"])
        if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
            continue

        # found code blocks, execute code and push "last_n_messages" back
        exitcode, logs = self.execute_code_blocks(code_blocks)
        code_execution_config["last_n_messages"] = last_n_messages
        exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
        return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"

    # no code blocks are found, push last_n_messages back and return.
    code_execution_config["last_n_messages"] = last_n_messages

    return False, None

generate_function_call_reply #

generate_function_call_reply(messages=None, sender=None, config=None)

Generate a reply using function call.

"function_call" replaced by "tool_calls" as of OpenAI API v1.1.0 See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions

Source code in autogen/agentchat/conversable_agent.py
def generate_function_call_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Optional[dict[str, Any]]]:
    """Generate a reply using function call.

    "function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
    See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
    """
    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender]
    message = messages[-1]
    if message.get("function_call"):
        call_id = message.get("id", None)
        func_call = message["function_call"]
        func = self._function_map.get(func_call.get("name", None), None)
        if inspect.iscoroutinefunction(func):
            try:
                # get the running loop if it was already created
                loop = asyncio.get_running_loop()
                close_loop = False
            except RuntimeError:
                # create a loop if there is no running loop
                loop = asyncio.new_event_loop()
                close_loop = True

            _, func_return = loop.run_until_complete(self.a_execute_function(func_call, call_id=call_id))
            if close_loop:
                loop.close()
        else:
            _, func_return = self.execute_function(message["function_call"], call_id=call_id)
        return True, func_return
    return False, None

a_generate_function_call_reply async #

a_generate_function_call_reply(messages=None, sender=None, config=None)

Generate a reply using async function call.

"function_call" replaced by "tool_calls" as of OpenAI API v1.1.0 See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions

Source code in autogen/agentchat/conversable_agent.py
async def a_generate_function_call_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Optional[dict[str, Any]]]:
    """Generate a reply using async function call.

    "function_call" replaced by "tool_calls" as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
    See https://platform.openai.com/docs/api-reference/chat/create#chat-create-functions
    """
    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender]
    message = messages[-1]
    if "function_call" in message:
        call_id = message.get("id", None)
        func_call = message["function_call"]
        func_name = func_call.get("name", "")
        func = self._function_map.get(func_name, None)
        if func and inspect.iscoroutinefunction(func):
            _, func_return = await self.a_execute_function(func_call, call_id=call_id)
        else:
            _, func_return = self.execute_function(func_call, call_id=call_id)
        return True, func_return

    return False, None

generate_tool_calls_reply #

generate_tool_calls_reply(messages=None, sender=None, config=None)

Generate a reply using tool call.

Source code in autogen/agentchat/conversable_agent.py
def generate_tool_calls_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Optional[dict[str, Any]]]:
    """Generate a reply using tool call."""
    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender]
    message = messages[-1]
    tool_returns = []
    for tool_call in message.get("tool_calls", []):
        function_call = tool_call.get("function", {})
        tool_call_id = tool_call.get("id", None)
        func = self._function_map.get(function_call.get("name", None), None)
        if inspect.iscoroutinefunction(func):
            try:
                # get the running loop if it was already created
                loop = asyncio.get_running_loop()
                close_loop = False
            except RuntimeError:
                # create a loop if there is no running loop
                loop = asyncio.new_event_loop()
                close_loop = True

            _, func_return = loop.run_until_complete(self.a_execute_function(function_call, call_id=tool_call_id))
            if close_loop:
                loop.close()
        else:
            _, func_return = self.execute_function(function_call, call_id=tool_call_id)
        content = func_return.get("content", "")
        if content is None:
            content = ""

        if tool_call_id is not None:
            tool_call_response = {
                "tool_call_id": tool_call_id,
                "role": "tool",
                "content": content,
            }
        else:
            # Do not include tool_call_id if it is not present.
            # This is to make the tool call object compatible with Mistral API.
            tool_call_response = {
                "role": "tool",
                "content": content,
            }
        tool_returns.append(tool_call_response)
    if tool_returns:
        return True, {
            "role": "tool",
            "tool_responses": tool_returns,
            "content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
        }
    return False, None

a_generate_tool_calls_reply async #

a_generate_tool_calls_reply(messages=None, sender=None, config=None)

Generate a reply using async function call.

Source code in autogen/agentchat/conversable_agent.py
async def a_generate_tool_calls_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Optional[dict[str, Any]]]:
    """Generate a reply using async function call."""
    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender]
    message = messages[-1]
    async_tool_calls = []
    for tool_call in message.get("tool_calls", []):
        async_tool_calls.append(self._a_execute_tool_call(tool_call))
    if async_tool_calls:
        tool_returns = await asyncio.gather(*async_tool_calls)
        return True, {
            "role": "tool",
            "tool_responses": tool_returns,
            "content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
        }

    return False, None

check_termination_and_human_reply #

check_termination_and_human_reply(messages=None, sender=None, config=None)

Check if the conversation should be terminated, and if human reply is provided.

This method checks for conditions that require the conversation to be terminated, such as reaching a maximum number of consecutive auto-replies or encountering a termination message. Additionally, it prompts for and processes human input based on the configured human input mode, which can be 'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter for the conversation and prints relevant messages based on the human input received.

PARAMETER DESCRIPTION
messages

A list of message dictionaries, representing the conversation history.

TYPE: Optional[list[dict[str, Any]]] DEFAULT: None

sender

The agent object representing the sender of the message.

TYPE: Optional[Agent] DEFAULT: None

config

Configuration object, defaults to the current instance if not provided.

TYPE: Optional[Any] DEFAULT: None

RETURNS DESCRIPTION
bool

A tuple containing a boolean indicating if the conversation

Union[str, None]

should be terminated, and a human reply which can be a string, a dictionary, or None.

Source code in autogen/agentchat/conversable_agent.py
def check_termination_and_human_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Union[str, None]]:
    """Check if the conversation should be terminated, and if human reply is provided.

    This method checks for conditions that require the conversation to be terminated, such as reaching
    a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
    it prompts for and processes human input based on the configured human input mode, which can be
    'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
    for the conversation and prints relevant messages based on the human input received.

    Args:
        messages: A list of message dictionaries, representing the conversation history.
        sender: The agent object representing the sender of the message.
        config: Configuration object, defaults to the current instance if not provided.

    Returns:
        A tuple containing a boolean indicating if the conversation
        should be terminated, and a human reply which can be a string, a dictionary, or None.
    """
    iostream = IOStream.get_default()

    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender] if sender else []

    # if there are no messages, continue the conversation
    if not messages:
        return False, None
    message = messages[-1]

    reply = ""
    no_human_input_msg = ""
    sender_name = "the sender" if sender is None else sender.name
    if self.human_input_mode == "ALWAYS":
        reply = self.get_human_input(
            f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
        )
        no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
        # if the human input is empty, and the message is a termination message, then we will terminate the conversation
        reply = reply if reply or not self._is_termination_msg(message) else "exit"
    else:
        if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
            if self.human_input_mode == "NEVER":
                reply = "exit"
            else:
                # self.human_input_mode == "TERMINATE":
                terminate = self._is_termination_msg(message)
                reply = self.get_human_input(
                    f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
                    if terminate
                    else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
                )
                no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
                # if the human input is empty, and the message is a termination message, then we will terminate the conversation
                reply = reply if reply or not terminate else "exit"
        elif self._is_termination_msg(message):
            if self.human_input_mode == "NEVER":
                reply = "exit"
            else:
                # self.human_input_mode == "TERMINATE":
                reply = self.get_human_input(
                    f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
                )
                no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
                # if the human input is empty, and the message is a termination message, then we will terminate the conversation
                reply = reply or "exit"

    # print the no_human_input_msg
    if no_human_input_msg:
        iostream.send(
            TerminationAndHumanReplyMessage(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self)
        )

    # stop the conversation
    if reply == "exit":
        # reset the consecutive_auto_reply_counter
        self._consecutive_auto_reply_counter[sender] = 0
        return True, None

    # send the human reply
    if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
        # reset the consecutive_auto_reply_counter
        self._consecutive_auto_reply_counter[sender] = 0
        # User provided a custom response, return function and tool failures indicating user interruption
        tool_returns = []
        if message.get("function_call", False):
            tool_returns.append({
                "role": "function",
                "name": message["function_call"].get("name", ""),
                "content": "USER INTERRUPTED",
            })

        if message.get("tool_calls", False):
            tool_returns.extend([
                {"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
                for tool_call in message["tool_calls"]
            ])

        response = {"role": "user", "content": reply}
        if tool_returns:
            response["tool_responses"] = tool_returns

        return True, response

    # increment the consecutive_auto_reply_counter
    self._consecutive_auto_reply_counter[sender] += 1
    if self.human_input_mode != "NEVER":
        iostream.send(UsingAutoReplyMessage(human_input_mode=self.human_input_mode, sender=sender, recipient=self))

    return False, None

a_check_termination_and_human_reply async #

a_check_termination_and_human_reply(messages=None, sender=None, config=None)

(async) Check if the conversation should be terminated, and if human reply is provided.

This method checks for conditions that require the conversation to be terminated, such as reaching a maximum number of consecutive auto-replies or encountering a termination message. Additionally, it prompts for and processes human input based on the configured human input mode, which can be 'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter for the conversation and prints relevant messages based on the human input received.

PARAMETER DESCRIPTION
messages

A list of message dictionaries, representing the conversation history.

TYPE: Optional[List[Dict]] DEFAULT: None

sender

The agent object representing the sender of the message.

TYPE: Optional[Agent] DEFAULT: None

config

Configuration object, defaults to the current instance if not provided.

TYPE: Optional[Any] DEFAULT: None

RETURNS DESCRIPTION
bool

Tuple[bool, Union[str, Dict, None]]: A tuple containing a boolean indicating if the conversation

Union[str, None]

should be terminated, and a human reply which can be a string, a dictionary, or None.

Source code in autogen/agentchat/conversable_agent.py
async def a_check_termination_and_human_reply(
    self,
    messages: Optional[list[dict[str, Any]]] = None,
    sender: Optional[Agent] = None,
    config: Optional[Any] = None,
) -> tuple[bool, Union[str, None]]:
    """(async) Check if the conversation should be terminated, and if human reply is provided.

    This method checks for conditions that require the conversation to be terminated, such as reaching
    a maximum number of consecutive auto-replies or encountering a termination message. Additionally,
    it prompts for and processes human input based on the configured human input mode, which can be
    'ALWAYS', 'NEVER', or 'TERMINATE'. The method also manages the consecutive auto-reply counter
    for the conversation and prints relevant messages based on the human input received.

    Args:
        messages (Optional[List[Dict]]): A list of message dictionaries, representing the conversation history.
        sender (Optional[Agent]): The agent object representing the sender of the message.
        config (Optional[Any]): Configuration object, defaults to the current instance if not provided.

    Returns:
        Tuple[bool, Union[str, Dict, None]]: A tuple containing a boolean indicating if the conversation
        should be terminated, and a human reply which can be a string, a dictionary, or None.
    """
    iostream = IOStream.get_default()

    if config is None:
        config = self
    if messages is None:
        messages = self._oai_messages[sender] if sender else []
    message = messages[-1] if messages else {}
    reply = ""
    no_human_input_msg = ""
    sender_name = "the sender" if sender is None else sender.name
    if self.human_input_mode == "ALWAYS":
        reply = await self.a_get_human_input(
            f"Replying as {self.name}. Provide feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
        )
        no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
        # if the human input is empty, and the message is a termination message, then we will terminate the conversation
        reply = reply if reply or not self._is_termination_msg(message) else "exit"
    else:
        if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
            if self.human_input_mode == "NEVER":
                reply = "exit"
            else:
                # self.human_input_mode == "TERMINATE":
                terminate = self._is_termination_msg(message)
                reply = await self.a_get_human_input(
                    f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
                    if terminate
                    else f"Please give feedback to {sender_name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
                )
                no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
                # if the human input is empty, and the message is a termination message, then we will terminate the conversation
                reply = reply if reply or not terminate else "exit"
        elif self._is_termination_msg(message):
            if self.human_input_mode == "NEVER":
                reply = "exit"
            else:
                # self.human_input_mode == "TERMINATE":
                reply = await self.a_get_human_input(
                    f"Please give feedback to {sender_name}. Press enter or type 'exit' to stop the conversation: "
                )
                no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
                # if the human input is empty, and the message is a termination message, then we will terminate the conversation
                reply = reply or "exit"

    # print the no_human_input_msg
    if no_human_input_msg:
        iostream.send(
            TerminationAndHumanReplyMessage(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self)
        )

    # stop the conversation
    if reply == "exit":
        # reset the consecutive_auto_reply_counter
        self._consecutive_auto_reply_counter[sender] = 0
        return True, None

    # send the human reply
    if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
        # User provided a custom response, return function and tool results indicating user interruption
        # reset the consecutive_auto_reply_counter
        self._consecutive_auto_reply_counter[sender] = 0
        tool_returns = []
        if message.get("function_call", False):
            tool_returns.append({
                "role": "function",
                "name": message["function_call"].get("name", ""),
                "content": "USER INTERRUPTED",
            })

        if message.get("tool_calls", False):
            tool_returns.extend([
                {"role": "tool", "tool_call_id": tool_call.get("id", ""), "content": "USER INTERRUPTED"}
                for tool_call in message["tool_calls"]
            ])

        response = {"role": "user", "content": reply}
        if tool_returns:
            response["tool_responses"] = tool_returns

        return True, response

    # increment the consecutive_auto_reply_counter
    self._consecutive_auto_reply_counter[sender] += 1
    if self.human_input_mode != "NEVER":
        iostream.send(UsingAutoReplyMessage(human_input_mode=self.human_input_mode, sender=sender, recipient=self))

    return False, None

get_human_input #

get_human_input(prompt)

Get human input.

Override this method to customize the way to get human input.

PARAMETER DESCRIPTION
prompt

prompt for the human input.

TYPE: str

RETURNS DESCRIPTION
str

human input.

TYPE: str

Source code in autogen/agentchat/conversable_agent.py
def get_human_input(self, prompt: str) -> str:
    """Get human input.

    Override this method to customize the way to get human input.

    Args:
        prompt (str): prompt for the human input.

    Returns:
        str: human input.
    """
    iostream = IOStream.get_default()

    reply = iostream.input(prompt)
    self._human_input.append(reply)
    return reply

a_get_human_input async #

a_get_human_input(prompt)

(Async) Get human input.

Override this method to customize the way to get human input.

PARAMETER DESCRIPTION
prompt

prompt for the human input.

TYPE: str

RETURNS DESCRIPTION
str

human input.

TYPE: str

Source code in autogen/agentchat/conversable_agent.py
async def a_get_human_input(self, prompt: str) -> str:
    """(Async) Get human input.

    Override this method to customize the way to get human input.

    Args:
        prompt (str): prompt for the human input.

    Returns:
        str: human input.
    """
    loop = asyncio.get_running_loop()
    reply = await loop.run_in_executor(None, functools.partial(self.get_human_input, prompt))
    return reply

run_code #

run_code(code, **kwargs)

Run the code and return the result.

Override this function to modify the way to run the code.

PARAMETER DESCRIPTION
code

the code to be executed.

TYPE: str

**kwargs

other keyword arguments.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
int

A tuple of (exitcode, logs, image).

exitcode

the exit code of the code execution.

TYPE: int

logs

the logs of the code execution.

TYPE: str

image

the docker image used for the code execution.

TYPE: str or None

Source code in autogen/agentchat/conversable_agent.py
def run_code(self, code, **kwargs: Any) -> tuple[int, str, Optional[str]]:
    """Run the code and return the result.

    Override this function to modify the way to run the code.

    Args:
        code (str): the code to be executed.
        **kwargs: other keyword arguments.

    Returns:
        A tuple of (exitcode, logs, image).
        exitcode (int): the exit code of the code execution.
        logs (str): the logs of the code execution.
        image (str or None): the docker image used for the code execution.
    """
    return execute_code(code, **kwargs)

execute_code_blocks #

execute_code_blocks(code_blocks)

Execute the code blocks and return the result.

Source code in autogen/agentchat/conversable_agent.py
def execute_code_blocks(self, code_blocks):
    """Execute the code blocks and return the result."""
    iostream = IOStream.get_default()

    logs_all = ""
    for i, code_block in enumerate(code_blocks):
        lang, code = code_block
        if not lang:
            lang = infer_lang(code)

        iostream.send(ExecuteCodeBlockMessage(code=code, language=lang, code_block_count=i, recipient=self))

        if lang in ["bash", "shell", "sh"]:
            exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config)
        elif lang in PYTHON_VARIANTS:
            filename = code[11 : code.find("\n")].strip() if code.startswith("# filename: ") else None
            exitcode, logs, image = self.run_code(
                code,
                lang="python",
                filename=filename,
                **self._code_execution_config,
            )
        else:
            # In case the language is not supported, we return an error message.
            exitcode, logs, image = (
                1,
                f"unknown language {lang}",
                None,
            )
            # raise NotImplementedError
        if image is not None:
            self._code_execution_config["use_docker"] = image
        logs_all += "\n" + logs
        if exitcode != 0:
            return exitcode, logs_all
    return exitcode, logs_all

execute_function #

execute_function(func_call, call_id=None, verbose=False)

Execute a function call and return the result.

Override this function to modify the way to execute function and tool calls.

PARAMETER DESCRIPTION
func_call

a dictionary extracted from openai message at "function_call" or "tool_calls" with keys "name" and "arguments".

TYPE: dict[str, Any]

call_id

a string to identify the tool call.

TYPE: Optional[str] DEFAULT: None

verbose

Whether to send messages about the execution details to the output stream. When True, both the function call arguments and the execution result will be displayed. Defaults to False.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
bool

A tuple of (is_exec_success, result_dict).

is_exec_success

whether the execution is successful.

TYPE: boolean

result_dict

a dictionary with keys "name", "role", and "content". Value of "role" is "function".

TYPE: tuple[bool, dict[str, Any]]

"function_call" deprecated as of OpenAI API v1.1.0 See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call

Source code in autogen/agentchat/conversable_agent.py
def execute_function(
    self, func_call: dict[str, Any], call_id: Optional[str] = None, verbose: bool = False
) -> tuple[bool, dict[str, Any]]:
    """Execute a function call and return the result.

    Override this function to modify the way to execute function and tool calls.

    Args:
        func_call: a dictionary extracted from openai message at "function_call" or "tool_calls" with keys "name" and "arguments".
        call_id: a string to identify the tool call.
        verbose (bool): Whether to send messages about the execution details to the
            output stream. When True, both the function call arguments and the execution
            result will be displayed. Defaults to False.


    Returns:
        A tuple of (is_exec_success, result_dict).
        is_exec_success (boolean): whether the execution is successful.
        result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".

    "function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
    See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
    """
    iostream = IOStream.get_default()

    func_name = func_call.get("name", "")
    func = self._function_map.get(func_name, None)

    is_exec_success = False
    if func is not None:
        # Extract arguments from a json-like string and put it into a dict.
        input_string = self._format_json_str(func_call.get("arguments", "{}"))
        try:
            arguments = json.loads(input_string)
        except json.JSONDecodeError as e:
            arguments = None
            content = f"Error: {e}\n The argument must be in JSON format."

        # Try to execute the function
        if arguments is not None:
            iostream.send(
                ExecuteFunctionMessage(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self)
            )
            try:
                content = func(**arguments)
                is_exec_success = True
            except Exception as e:
                content = f"Error: {e}"
    else:
        arguments = {}
        content = f"Error: Function {func_name} not found."

    if verbose:
        iostream.send(
            ExecutedFunctionMessage(
                func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self
            )
        )

    return is_exec_success, {
        "name": func_name,
        "role": "function",
        "content": content,
    }

a_execute_function async #

a_execute_function(func_call, call_id=None, verbose=False)

Execute an async function call and return the result.

Override this function to modify the way async functions and tools are executed.

PARAMETER DESCRIPTION
func_call

a dictionary extracted from openai message at key "function_call" or "tool_calls" with keys "name" and "arguments".

TYPE: dict[str, Any]

call_id

a string to identify the tool call.

TYPE: Optional[str] DEFAULT: None

verbose

Whether to send messages about the execution details to the output stream. When True, both the function call arguments and the execution result will be displayed. Defaults to False.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
bool

A tuple of (is_exec_success, result_dict).

is_exec_success

whether the execution is successful.

TYPE: boolean

result_dict

a dictionary with keys "name", "role", and "content". Value of "role" is "function".

TYPE: tuple[bool, dict[str, Any]]

"function_call" deprecated as of OpenAI API v1.1.0 See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call

Source code in autogen/agentchat/conversable_agent.py
async def a_execute_function(
    self, func_call: dict[str, Any], call_id: Optional[str] = None, verbose: bool = False
) -> tuple[bool, dict[str, Any]]:
    """Execute an async function call and return the result.

    Override this function to modify the way async functions and tools are executed.

    Args:
        func_call: a dictionary extracted from openai message at key "function_call" or "tool_calls" with keys "name" and "arguments".
        call_id: a string to identify the tool call.
        verbose (bool): Whether to send messages about the execution details to the
            output stream. When True, both the function call arguments and the execution
            result will be displayed. Defaults to False.

    Returns:
        A tuple of (is_exec_success, result_dict).
        is_exec_success (boolean): whether the execution is successful.
        result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".

    "function_call" deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
    See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
    """
    iostream = IOStream.get_default()

    func_name = func_call.get("name", "")
    func = self._function_map.get(func_name, None)

    is_exec_success = False
    if func is not None:
        # Extract arguments from a json-like string and put it into a dict.
        input_string = self._format_json_str(func_call.get("arguments", "{}"))
        try:
            arguments = json.loads(input_string)
        except json.JSONDecodeError as e:
            arguments = None
            content = f"Error: {e}\n The argument must be in JSON format."

        # Try to execute the function
        if arguments is not None:
            iostream.send(
                ExecuteFunctionMessage(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self)
            )
            try:
                if inspect.iscoroutinefunction(func):
                    content = await func(**arguments)
                else:
                    # Fallback to sync function if the function is not async
                    content = func(**arguments)
                is_exec_success = True
            except Exception as e:
                content = f"Error: {e}"
    else:
        arguments = {}
        content = f"Error: Function {func_name} not found."

    if verbose:
        iostream.send(
            ExecutedFunctionMessage(
                func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self
            )
        )

    return is_exec_success, {
        "name": func_name,
        "role": "function",
        "content": content,
    }

generate_init_message #

generate_init_message(message, **kwargs)

Generate the initial message for the agent. If message is None, input() will be called to get the initial message.

PARAMETER DESCRIPTION
message

the message to be processed.

TYPE: str or None

**kwargs

any additional information. It has the following reserved fields: "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string. If provided, we will combine this carryover with the "message" content when generating the initial chat message.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
Union[str, dict[str, Any]]

str or dict: the processed message.

Source code in autogen/agentchat/conversable_agent.py
def generate_init_message(
    self, message: Optional[Union[dict[str, Any], str]], **kwargs: Any
) -> Union[str, dict[str, Any]]:
    """Generate the initial message for the agent.
    If message is None, input() will be called to get the initial message.

    Args:
        message (str or None): the message to be processed.
        **kwargs: any additional information. It has the following reserved fields:
            "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
                If provided, we will combine this carryover with the "message" content when generating the initial chat
                message.

    Returns:
        str or dict: the processed message.
    """
    if message is None:
        message = self.get_human_input(">")

    return self._handle_carryover(message, kwargs)

a_generate_init_message async #

a_generate_init_message(message, **kwargs)

Generate the initial message for the agent. If message is None, input() will be called to get the initial message.

PARAMETER DESCRIPTION
message

the message to be processed.

TYPE: str or None

**kwargs

any additional information. It has the following reserved fields: "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string. If provided, we will combine this carryover with the "message" content when generating the initial chat message.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
Union[str, dict[str, Any]]

str or dict: the processed message.

Source code in autogen/agentchat/conversable_agent.py
async def a_generate_init_message(
    self, message: Optional[Union[dict[str, Any], str]], **kwargs: Any
) -> Union[str, dict[str, Any]]:
    """Generate the initial message for the agent.
    If message is None, input() will be called to get the initial message.

    Args:
        message (str or None): the message to be processed.
        **kwargs: any additional information. It has the following reserved fields:
            "carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
                If provided, we will combine this carryover with the "message" content when generating the initial chat
                message.

    Returns:
        str or dict: the processed message.
    """
    if message is None:
        message = await self.a_get_human_input(">")

    return self._handle_carryover(message, kwargs)

remove_tool_for_llm #

remove_tool_for_llm(tool)

Remove a tool (register for LLM tool)

Source code in autogen/agentchat/conversable_agent.py
def remove_tool_for_llm(self, tool: Tool) -> None:
    """Remove a tool (register for LLM tool)"""
    try:
        self._register_for_llm(tool=tool, api_style="tool", is_remove=True)
        self._tools.remove(tool)
    except ValueError:
        raise ValueError(f"Tool {tool} not found in collection")

register_function #

register_function(function_map)

Register functions to the agent.

PARAMETER DESCRIPTION
function_map

a dictionary mapping function names to functions. if function_map[name] is None, the function will be removed from the function_map.

TYPE: dict[str, Union[Callable[..., Any]]]

Source code in autogen/agentchat/conversable_agent.py
def register_function(self, function_map: dict[str, Union[Callable[..., Any]]]):
    """Register functions to the agent.

    Args:
        function_map: a dictionary mapping function names to functions. if function_map[name] is None, the function will be removed from the function_map.
    """
    for name, func in function_map.items():
        self._assert_valid_name(name)
        if func is None and name not in self._function_map:
            warnings.warn(f"The function {name} to remove doesn't exist", name)
        if name in self._function_map:
            warnings.warn(f"Function '{name}' is being overridden.", UserWarning)
    self._function_map.update(function_map)
    self._function_map = {k: v for k, v in self._function_map.items() if v is not None}

update_function_signature #

update_function_signature(func_sig, is_remove)

Update a function_signature in the LLM configuration for function_call.

PARAMETER DESCRIPTION
func_sig

description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions

TYPE: str or dict

is_remove

whether removing the function from llm_config with name 'func_sig'

TYPE: None

Deprecated as of OpenAI API v1.1.0 See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call

Source code in autogen/agentchat/conversable_agent.py
def update_function_signature(self, func_sig: Union[str, dict[str, Any]], is_remove: None):
    """Update a function_signature in the LLM configuration for function_call.

    Args:
        func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions
        is_remove: whether removing the function from llm_config with name 'func_sig'

    Deprecated as of [OpenAI API v1.1.0](https://github.com/openai/openai-python/releases/tag/v1.1.0)
    See https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
    """
    if not isinstance(self.llm_config, dict):
        error_msg = "To update a function signature, agent must have an llm_config"
        logger.error(error_msg)
        raise AssertionError(error_msg)

    if is_remove:
        if "functions" not in self.llm_config:
            error_msg = f"The agent config doesn't have function {func_sig}."
            logger.error(error_msg)
            raise AssertionError(error_msg)
        else:
            self.llm_config["functions"] = [
                func for func in self.llm_config["functions"] if func["name"] != func_sig
            ]
    else:
        if not isinstance(func_sig, dict):
            raise ValueError(
                f"The function signature must be of the type dict. Received function signature type {type(func_sig)}"
            )
        if "name" not in func_sig:
            raise ValueError(f"The function signature must have a 'name' key. Received: {func_sig}")
        self._assert_valid_name(func_sig["name"]), func_sig
        if "functions" in self.llm_config:
            if any(func["name"] == func_sig["name"] for func in self.llm_config["functions"]):
                warnings.warn(f"Function '{func_sig['name']}' is being overridden.", UserWarning)

            self.llm_config["functions"] = [
                func for func in self.llm_config["functions"] if func.get("name") != func_sig["name"]
            ] + [func_sig]
        else:
            self.llm_config["functions"] = [func_sig]

    if len(self.llm_config["functions"]) == 0:
        del self.llm_config["functions"]

    self.client = OpenAIWrapper(**self.llm_config)

update_tool_signature #

update_tool_signature(tool_sig, is_remove)

Update a tool_signature in the LLM configuration for tool_call.

PARAMETER DESCRIPTION
tool_sig

description/name of the tool to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools

TYPE: str or dict

is_remove

whether removing the tool from llm_config with name 'tool_sig'

TYPE: bool

Source code in autogen/agentchat/conversable_agent.py
def update_tool_signature(self, tool_sig: Union[str, dict[str, Any]], is_remove: bool):
    """Update a tool_signature in the LLM configuration for tool_call.

    Args:
        tool_sig (str or dict): description/name of the tool to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
        is_remove: whether removing the tool from llm_config with name 'tool_sig'
    """
    if not self.llm_config:
        error_msg = "To update a tool signature, agent must have an llm_config"
        logger.error(error_msg)
        raise AssertionError(error_msg)

    if is_remove:
        if "tools" not in self.llm_config:
            error_msg = f"The agent config doesn't have tool {tool_sig}."
            logger.error(error_msg)
            raise AssertionError(error_msg)
        else:
            current_tools = self.llm_config["tools"]
            filtered_tools = []

            # Loop through and rebuild tools list without the tool to remove
            for tool in current_tools:
                tool_name = tool["function"]["name"]

                # Match by tool name, or by tool signature
                is_different = tool_name != tool_sig if isinstance(tool_sig, str) else tool != tool_sig

                if is_different:
                    filtered_tools.append(tool)

            self.llm_config["tools"] = filtered_tools
    else:
        if not isinstance(tool_sig, dict):
            raise ValueError(
                f"The tool signature must be of the type dict. Received tool signature type {type(tool_sig)}"
            )
        self._assert_valid_name(tool_sig["function"]["name"])
        if "tools" in self.llm_config:
            if any(tool["function"]["name"] == tool_sig["function"]["name"] for tool in self.llm_config["tools"]):
                warnings.warn(f"Function '{tool_sig['function']['name']}' is being overridden.", UserWarning)
            self.llm_config["tools"] = [
                tool
                for tool in self.llm_config["tools"]
                if tool.get("function", {}).get("name") != tool_sig["function"]["name"]
            ] + [tool_sig]
        else:
            self.llm_config["tools"] = [tool_sig]

    if len(self.llm_config["tools"]) == 0:
        del self.llm_config["tools"]

    self.client = OpenAIWrapper(**self.llm_config)

can_execute_function #

can_execute_function(name)

Whether the agent can execute the function.

Source code in autogen/agentchat/conversable_agent.py
def can_execute_function(self, name: Union[list[str], str]) -> bool:
    """Whether the agent can execute the function."""
    names = name if isinstance(name, list) else [name]
    return all([n in self._function_map for n in names])

register_for_llm #

register_for_llm(*, name=None, description=None, api_style='tool')

Decorator factory for registering a function to be used by an agent.

It's return value is used to decorate a function to be registered to the agent. The function uses type hints to specify the arguments and return type. The function name is used as the default name for the function, but a custom name can be provided. The function description is used to describe the function in the agent's configuration.

PARAMETER DESCRIPTION
name

name of the function. If None, the function name will be used (default: None).

TYPE: optional(str DEFAULT: None

description

description of the function (default: None). It is mandatory for the initial decorator, but the following ones can omit it.

TYPE: optional(str DEFAULT: None

api_style

(literal): the API style for function call. For Azure OpenAI API, use version 2023-12-01-preview or later. "function" style will be deprecated. For earlier version use "function" if "tool" doesn't work. See Azure OpenAI documentation for details.

TYPE: Literal['function', 'tool'] DEFAULT: 'tool'

RETURNS DESCRIPTION
Callable[[Union[F, Tool]], Tool]

The decorator for registering a function to be used by an agent.

Examples:

@user_proxy.register_for_execution()
@agent2.register_for_llm()
@agent1.register_for_llm(description="This is a very useful function")
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
     return a + str(b * c)

For Azure OpenAI versions prior to 2023-12-01-preview, set api_style to "function" if "tool" doesn't work:

@agent2.register_for_llm(api_style="function")
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
     return a + str(b * c)

Source code in autogen/agentchat/conversable_agent.py
def register_for_llm(
    self,
    *,
    name: Optional[str] = None,
    description: Optional[str] = None,
    api_style: Literal["function", "tool"] = "tool",
) -> Callable[[Union[F, Tool]], Tool]:
    """Decorator factory for registering a function to be used by an agent.

    It's return value is used to decorate a function to be registered to the agent. The function uses type hints to
    specify the arguments and return type. The function name is used as the default name for the function,
    but a custom name can be provided. The function description is used to describe the function in the
    agent's configuration.

    Args:
        name (optional(str)): name of the function. If None, the function name will be used (default: None).
        description (optional(str)): description of the function (default: None). It is mandatory
            for the initial decorator, but the following ones can omit it.
        api_style: (literal): the API style for function call.
            For Azure OpenAI API, use version 2023-12-01-preview or later.
            `"function"` style will be deprecated. For earlier version use
            `"function"` if `"tool"` doesn't work.
            See [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling?tabs=python) for details.

    Returns:
        The decorator for registering a function to be used by an agent.

    Examples:
        ```
        @user_proxy.register_for_execution()
        @agent2.register_for_llm()
        @agent1.register_for_llm(description="This is a very useful function")
        def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
             return a + str(b * c)
        ```

        For Azure OpenAI versions prior to 2023-12-01-preview, set `api_style`
        to `"function"` if `"tool"` doesn't work:
        ```
        @agent2.register_for_llm(api_style="function")
        def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str:
             return a + str(b * c)
        ```

    """

    def _decorator(
        func_or_tool: Union[F, Tool], name: Optional[str] = name, description: Optional[str] = description
    ) -> Tool:
        """Decorator for registering a function to be used by an agent.

        Args:
            func_or_tool: The function or the tool to be registered.
            name: The name of the function or the tool.
            description: The description of the function or the tool.

        Returns:
            The function to be registered, with the _description attribute set to the function description.

        Raises:
            ValueError: if the function description is not provided and not propagated by a previous decorator.
            RuntimeError: if the LLM config is not set up before registering a function.

        """
        tool = self._create_tool_if_needed(func_or_tool, name, description)

        self._register_for_llm(tool, api_style)
        self._tools.append(tool)

        return tool

    return _decorator

register_for_execution #

register_for_execution(name=None, description=None)

Decorator factory for registering a function to be executed by an agent.

It's return value is used to decorate a function to be registered to the agent.

PARAMETER DESCRIPTION
name

name of the function. If None, the function name will be used (default: None).

TYPE: Optional[str] DEFAULT: None

description

description of the function (default: None).

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Callable[[Union[Tool, F]], Tool]

The decorator for registering a function to be used by an agent.

Examples:

@user_proxy.register_for_execution()
@agent2.register_for_llm()
@agent1.register_for_llm(description="This is a very useful function")
def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14):
     return a + str(b * c)
Source code in autogen/agentchat/conversable_agent.py
def register_for_execution(
    self,
    name: Optional[str] = None,
    description: Optional[str] = None,
) -> Callable[[Union[Tool, F]], Tool]:
    """Decorator factory for registering a function to be executed by an agent.

    It's return value is used to decorate a function to be registered to the agent.

    Args:
        name: name of the function. If None, the function name will be used (default: None).
        description: description of the function (default: None).

    Returns:
        The decorator for registering a function to be used by an agent.

    Examples:
        ```
        @user_proxy.register_for_execution()
        @agent2.register_for_llm()
        @agent1.register_for_llm(description="This is a very useful function")
        def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14):
             return a + str(b * c)
        ```

    """

    def _decorator(
        func_or_tool: Union[Tool, F], name: Optional[str] = name, description: Optional[str] = description
    ) -> Tool:
        """Decorator for registering a function to be used by an agent.

        Args:
            func_or_tool: the function or the tool to be registered.
            name: the name of the function.
            description: the description of the function.

        Returns:
            The tool to be registered.

        """

        tool = self._create_tool_if_needed(func_or_tool, name, description)
        chat_context = ChatContext(self)
        chat_context_params = {param: chat_context for param in tool._chat_context_param_names}

        self.register_function({tool.name: self._wrap_function(tool.func, chat_context_params)})

        return tool

    return _decorator

register_model_client #

register_model_client(model_client_cls, **kwargs)

Register a model client.

PARAMETER DESCRIPTION
model_client_cls

A custom client class that follows the Client interface

TYPE: ModelClient

**kwargs

The kwargs for the custom client class to be initialized with

TYPE: Any DEFAULT: {}

Source code in autogen/agentchat/conversable_agent.py
def register_model_client(self, model_client_cls: ModelClient, **kwargs: Any):
    """Register a model client.

    Args:
        model_client_cls: A custom client class that follows the Client interface
        **kwargs: The kwargs for the custom client class to be initialized with
    """
    self.client.register_model_client(model_client_cls, **kwargs)

register_hook #

register_hook(hookable_method, hook)

Registers a hook to be called by a hookable method, in order to add a capability to the agent. Registered hooks are kept in lists (one per hookable method), and are called in their order of registration.

PARAMETER DESCRIPTION
hookable_method

A hookable method name implemented by ConversableAgent.

TYPE: str

hook

A method implemented by a subclass of AgentCapability.

TYPE: Callable

Source code in autogen/agentchat/conversable_agent.py
def register_hook(self, hookable_method: str, hook: Callable):
    """Registers a hook to be called by a hookable method, in order to add a capability to the agent.
    Registered hooks are kept in lists (one per hookable method), and are called in their order of registration.

    Args:
        hookable_method: A hookable method name implemented by ConversableAgent.
        hook: A method implemented by a subclass of AgentCapability.
    """
    assert hookable_method in self.hook_lists, f"{hookable_method} is not a hookable method."
    hook_list = self.hook_lists[hookable_method]
    assert hook not in hook_list, f"{hook} is already registered as a hook."
    hook_list.append(hook)

update_agent_state_before_reply #

update_agent_state_before_reply(messages)

Calls any registered capability hooks to update the agent's state. Primarily used to update context variables. Will, potentially, modify the messages.

Source code in autogen/agentchat/conversable_agent.py
def update_agent_state_before_reply(self, messages: list[dict[str, Any]]) -> None:
    """Calls any registered capability hooks to update the agent's state.
    Primarily used to update context variables.
    Will, potentially, modify the messages.
    """
    hook_list = self.hook_lists["update_agent_state"]

    # Call each hook (in order of registration) to process the messages.
    for hook in hook_list:
        hook(self, messages)

process_all_messages_before_reply #

process_all_messages_before_reply(messages)

Calls any registered capability hooks to process all messages, potentially modifying the messages.

Source code in autogen/agentchat/conversable_agent.py
def process_all_messages_before_reply(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
    """Calls any registered capability hooks to process all messages, potentially modifying the messages."""
    hook_list = self.hook_lists["process_all_messages_before_reply"]
    # If no hooks are registered, or if there are no messages to process, return the original message list.
    if len(hook_list) == 0 or messages is None:
        return messages

    # Call each hook (in order of registration) to process the messages.
    processed_messages = messages
    for hook in hook_list:
        processed_messages = hook(processed_messages)
    return processed_messages

process_last_received_message #

process_last_received_message(messages)

Calls any registered capability hooks to use and potentially modify the text of the last message, as long as the last message is not a function call or exit command.

Source code in autogen/agentchat/conversable_agent.py
def process_last_received_message(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
    """Calls any registered capability hooks to use and potentially modify the text of the last message,
    as long as the last message is not a function call or exit command.
    """
    # If any required condition is not met, return the original message list.
    hook_list = self.hook_lists["process_last_received_message"]
    if len(hook_list) == 0:
        return messages  # No hooks registered.
    if messages is None:
        return None  # No message to process.
    if len(messages) == 0:
        return messages  # No message to process.
    last_message = messages[-1]
    if "function_call" in last_message:
        return messages  # Last message is a function call.
    if "context" in last_message:
        return messages  # Last message contains a context key.
    if "content" not in last_message:
        return messages  # Last message has no content.

    user_content = last_message["content"]
    if not isinstance(user_content, str) and not isinstance(user_content, list):
        # if the user_content is a string, it is for regular LLM
        # if the user_content is a list, it should follow the multimodal LMM format.
        return messages
    if user_content == "exit":
        return messages  # Last message is an exit command.

    # Call each hook (in order of registration) to process the user's message.
    processed_user_content = user_content
    for hook in hook_list:
        processed_user_content = hook(processed_user_content)

    if processed_user_content == user_content:
        return messages  # No hooks actually modified the user's message.

    # Replace the last user message with the expanded one.
    messages = messages.copy()
    messages[-1]["content"] = processed_user_content
    return messages

print_usage_summary #

print_usage_summary(mode=['actual', 'total'])

Print the usage summary.

Source code in autogen/agentchat/conversable_agent.py
def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) -> None:
    """Print the usage summary."""
    iostream = IOStream.get_default()
    if self.client is None:
        iostream.send(ConversableAgentUsageSummaryNoCostIncurredMessage(recipient=self))
    else:
        iostream.send(ConversableAgentUsageSummaryMessage(recipient=self))

    if self.client is not None:
        self.client.print_usage_summary(mode)

get_actual_usage #

get_actual_usage()

Get the actual usage summary.

Source code in autogen/agentchat/conversable_agent.py
def get_actual_usage(self) -> Union[None, dict[str, int]]:
    """Get the actual usage summary."""
    if self.client is None:
        return None
    else:
        return self.client.actual_usage_summary

get_total_usage #

get_total_usage()

Get the total usage summary.

Source code in autogen/agentchat/conversable_agent.py
def get_total_usage(self) -> Union[None, dict[str, int]]:
    """Get the total usage summary."""
    if self.client is None:
        return None
    else:
        return self.client.total_usage_summary

run #

run(message, *, tools=None, executor_kwargs=None, max_turns=None, msg_to='agent', clear_history=False, user_input=True, summary_method=DEFAULT_SUMMARY_METHOD)

Run a chat with the agent using the given message.

A second agent will be created to represent the user, this agent will by known by the name 'user'. This agent does not have code execution enabled by default, if needed pass the code execution config in with the executor_kwargs parameter.

The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.

PARAMETER DESCRIPTION
message

the message to be processed.

TYPE: str

tools

the tools to be used by the agent.

TYPE: Optional[Union[Tool, Iterable[Tool]]] DEFAULT: None

executor_kwargs

the keyword arguments for the executor.

TYPE: Optional[dict[str, Any]] DEFAULT: None

max_turns

maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.

TYPE: Optional[int] DEFAULT: None

msg_to

which agent is receiving the message and will be the first to reply, defaults to the agent.

TYPE: Literal['agent', 'user'] DEFAULT: 'agent'

clear_history

whether to clear the chat history.

TYPE: bool DEFAULT: False

user_input

the user will be asked for input at their turn.

TYPE: bool DEFAULT: True

summary_method

the method to summarize the chat.

TYPE: Optional[Union[str, Callable[..., Any]]] DEFAULT: DEFAULT_SUMMARY_METHOD

Source code in autogen/agentchat/conversable_agent.py
def run(
    self,
    message: str,
    *,
    tools: Optional[Union[Tool, Iterable[Tool]]] = None,
    executor_kwargs: Optional[dict[str, Any]] = None,
    max_turns: Optional[int] = None,
    msg_to: Literal["agent", "user"] = "agent",
    clear_history: bool = False,
    user_input: bool = True,
    summary_method: Optional[Union[str, Callable[..., Any]]] = DEFAULT_SUMMARY_METHOD,
) -> ChatResult:
    """Run a chat with the agent using the given message.

    A second agent will be created to represent the user, this agent will by known by the name 'user'. This agent does not have code execution enabled by default, if needed pass the code execution config in with the executor_kwargs parameter.

    The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.

    Args:
        message: the message to be processed.
        tools: the tools to be used by the agent.
        executor_kwargs: the keyword arguments for the executor.
        max_turns: maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.
        msg_to: which agent is receiving the message and will be the first to reply, defaults to the agent.
        clear_history: whether to clear the chat history.
        user_input: the user will be asked for input at their turn.
        summary_method: the method to summarize the chat.
    """
    with self._create_or_get_executor(
        executor_kwargs=executor_kwargs,
        tools=tools,
        agent_name="user",
        agent_human_input_mode="ALWAYS" if user_input else "NEVER",
    ) as executor:
        if msg_to == "agent":
            return executor.initiate_chat(
                self,
                message=message,
                clear_history=clear_history,
                max_turns=max_turns,
                summary_method=summary_method,
            )
        else:
            return self.initiate_chat(
                executor,
                message=message,
                clear_history=clear_history,
                max_turns=max_turns,
                summary_method=summary_method,
            )

a_run async #

a_run(message, *, tools=None, executor_kwargs=None, max_turns=None, msg_to='agent', clear_history=False, user_input=True, summary_method=DEFAULT_SUMMARY_METHOD)

Run a chat asynchronously with the agent using the given message.

A second agent will be created to represent the user, this agent will by known by the name 'user'.

The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.

PARAMETER DESCRIPTION
message

the message to be processed.

TYPE: str

tools

the tools to be used by the agent.

TYPE: Optional[Union[Tool, Iterable[Tool]]] DEFAULT: None

executor_kwargs

the keyword arguments for the executor.

TYPE: Optional[dict[str, Any]] DEFAULT: None

max_turns

maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.

TYPE: Optional[int] DEFAULT: None

msg_to

which agent is receiving the message and will be the first to reply, defaults to the agent.

TYPE: Literal['agent', 'user'] DEFAULT: 'agent'

clear_history

whether to clear the chat history.

TYPE: bool DEFAULT: False

user_input

the user will be asked for input at their turn.

TYPE: bool DEFAULT: True

summary_method

the method to summarize the chat.

TYPE: Optional[Union[str, Callable[..., Any]]] DEFAULT: DEFAULT_SUMMARY_METHOD

Source code in autogen/agentchat/conversable_agent.py
async def a_run(
    self,
    message: str,
    *,
    tools: Optional[Union[Tool, Iterable[Tool]]] = None,
    executor_kwargs: Optional[dict[str, Any]] = None,
    max_turns: Optional[int] = None,
    msg_to: Literal["agent", "user"] = "agent",
    clear_history: bool = False,
    user_input: bool = True,
    summary_method: Optional[Union[str, Callable[..., Any]]] = DEFAULT_SUMMARY_METHOD,
) -> ChatResult:
    """Run a chat asynchronously with the agent using the given message.

    A second agent will be created to represent the user, this agent will by known by the name 'user'.

    The user can terminate the conversation when prompted or, if agent's reply contains 'TERMINATE', it will terminate.

    Args:
        message: the message to be processed.
        tools: the tools to be used by the agent.
        executor_kwargs: the keyword arguments for the executor.
        max_turns: maximum number of turns (a turn is equivalent to both agents having replied), defaults no None which means unlimited. The original message is included.
        msg_to: which agent is receiving the message and will be the first to reply, defaults to the agent.
        clear_history: whether to clear the chat history.
        user_input: the user will be asked for input at their turn.
        summary_method: the method to summarize the chat.
    """
    with self._create_or_get_executor(
        executor_kwargs=executor_kwargs,
        tools=tools,
        agent_name="user",
        agent_human_input_mode="ALWAYS" if user_input else "NEVER",
    ) as executor:
        if msg_to == "agent":
            return await executor.a_initiate_chat(
                self,
                message=message,
                clear_history=clear_history,
                max_turns=max_turns,
                summary_method=summary_method,
            )
        else:
            return await self.a_initiate_chat(
                executor,
                message=message,
                clear_history=clear_history,
                max_turns=max_turns,
                summary_method=summary_method,
            )