Skip to content

DeepResearchTool

autogen.tools.experimental.DeepResearchTool #

DeepResearchTool(llm_config, max_web_steps=30)

Bases: Tool

A tool that delegates a web research task to the subteams of agents.

Initialize the DeepResearchTool.

PARAMETER DESCRIPTION
llm_config

The LLM configuration.

TYPE: dict[str, Any]

max_web_steps

The maximum number of web steps. Defaults to 30.

TYPE: int DEFAULT: 30

Source code in autogen/tools/experimental/deep_research/deep_research.py
def __init__(
    self,
    llm_config: dict[str, Any],
    max_web_steps: int = 30,
):
    """Initialize the DeepResearchTool.

    Args:
        llm_config (dict[str, Any]): The LLM configuration.
        max_web_steps (int, optional): The maximum number of web steps. Defaults to 30.
    """
    self.llm_config = llm_config

    self.summarizer_agent = ConversableAgent(
        name="SummarizerAgent",
        system_message=(
            "You are an agent with a task of answering the question provided by the user."
            "First you need to split the question into subquestions by calling the 'split_question_and_answer_subquestions' method."
            "Then you need to sintesize the answers the original question by combining the answers to the subquestions."
        ),
        is_termination_msg=lambda x: x.get("content", "")
        and x.get("content", "").startswith(self.ANSWER_CONFIRMED_PREFIX),
        llm_config=llm_config,
        human_input_mode="NEVER",
    )

    self.critic_agent = ConversableAgent(
        name="CriticAgent",
        system_message=(
            "You are a critic agent responsible for evaluating the answer provided by the summarizer agent.\n"
            "Your task is to assess the quality of the answer based on its coherence, relevance, and completeness.\n"
            "Provide constructive feedback on how the answer can be improved.\n"
            "If the answer is satisfactory, call the 'confirm_answer' method to end the task.\n"
        ),
        is_termination_msg=lambda x: x.get("content", "")
        and x.get("content", "").startswith(self.ANSWER_CONFIRMED_PREFIX),
        llm_config=llm_config,
        human_input_mode="NEVER",
    )

    def delegate_research_task(
        task: Annotated[str, "The task to perform a research on."],
        llm_config: Annotated[dict[str, Any], Depends(on(llm_config))],
        max_web_steps: Annotated[int, Depends(on(max_web_steps))],
    ) -> str:
        """Delegate a research task to the agent.

        Args:
            task (str): The task to perform a research on.
            llm_config (dict[str, Any]): The LLM configuration.
            max_web_steps (int): The maximum number of web steps.

        Returns:
            str: The answer to the research task.
        """

        @self.summarizer_agent.register_for_execution()
        @self.critic_agent.register_for_llm(description="Call this method to confirm the final answer.")
        def confirm_summary(answer: str, reasoning: str) -> str:
            return f"{self.ANSWER_CONFIRMED_PREFIX}" + answer + "\nReasoning: " + reasoning

        split_question_and_answer_subquestions = DeepResearchTool._get_split_question_and_answer_subquestions(
            llm_config=llm_config,
            max_web_steps=max_web_steps,
        )

        self.summarizer_agent.register_for_llm(description="Split the question into subquestions and get answers.")(
            split_question_and_answer_subquestions
        )
        self.critic_agent.register_for_execution()(split_question_and_answer_subquestions)

        result = self.critic_agent.initiate_chat(
            self.summarizer_agent,
            message="Please answer the following question: " + task,
            # This outer chat should preserve the history of the conversation
            clear_history=False,
        )

        return result.summary

    super().__init__(
        name=delegate_research_task.__name__,
        description="Delegate a research task to the deep research agent.",
        func_or_tool=delegate_research_task,
    )

name property #

name

description property #

description

func property #

func

tool_schema property #

tool_schema

Get the schema for the tool.

This is the preferred way of handling function calls with OpeaAI and compatible frameworks.

function_schema property #

function_schema

Get the schema for the function.

This is the old way of handling function calls with OpenAI and compatible frameworks. It is provided for backward compatibility.

realtime_tool_schema property #

realtime_tool_schema

Get the schema for the tool.

This is the preferred way of handling function calls with OpeaAI and compatible frameworks.

ANSWER_CONFIRMED_PREFIX class-attribute instance-attribute #

ANSWER_CONFIRMED_PREFIX = 'Answer confirmed:'

llm_config instance-attribute #

llm_config = llm_config

summarizer_agent instance-attribute #

summarizer_agent = ConversableAgent(name='SummarizerAgent', system_message="You are an agent with a task of answering the question provided by the user.First you need to split the question into subquestions by calling the 'split_question_and_answer_subquestions' method.Then you need to sintesize the answers the original question by combining the answers to the subquestions.", is_termination_msg=lambda x: get('content', '') and startswith(ANSWER_CONFIRMED_PREFIX), llm_config=llm_config, human_input_mode='NEVER')

critic_agent instance-attribute #

critic_agent = ConversableAgent(name='CriticAgent', system_message="You are a critic agent responsible for evaluating the answer provided by the summarizer agent.\nYour task is to assess the quality of the answer based on its coherence, relevance, and completeness.\nProvide constructive feedback on how the answer can be improved.\nIf the answer is satisfactory, call the 'confirm_answer' method to end the task.\n", is_termination_msg=lambda x: get('content', '') and startswith(ANSWER_CONFIRMED_PREFIX), llm_config=llm_config, human_input_mode='NEVER')

SUBQUESTIONS_ANSWER_PREFIX class-attribute instance-attribute #

SUBQUESTIONS_ANSWER_PREFIX = 'Subquestions answered:'

register_for_llm #

register_for_llm(agent)

Registers the tool for use with a ConversableAgent's language model (LLM).

This method registers the tool so that it can be invoked by the agent during interactions with the language model.

PARAMETER DESCRIPTION
agent

The agent to which the tool will be registered.

TYPE: ConversableAgent

Source code in autogen/tools/tool.py
def register_for_llm(self, agent: "ConversableAgent") -> None:
    """Registers the tool for use with a ConversableAgent's language model (LLM).

    This method registers the tool so that it can be invoked by the agent during
    interactions with the language model.

    Args:
        agent (ConversableAgent): The agent to which the tool will be registered.
    """
    agent.register_for_llm()(self)

register_for_execution #

register_for_execution(agent)

Registers the tool for direct execution by a ConversableAgent.

This method registers the tool so that it can be executed by the agent, typically outside of the context of an LLM interaction.

PARAMETER DESCRIPTION
agent

The agent to which the tool will be registered.

TYPE: ConversableAgent

Source code in autogen/tools/tool.py
def register_for_execution(self, agent: "ConversableAgent") -> None:
    """Registers the tool for direct execution by a ConversableAgent.

    This method registers the tool so that it can be executed by the agent,
    typically outside of the context of an LLM interaction.

    Args:
        agent (ConversableAgent): The agent to which the tool will be registered.
    """
    agent.register_for_execution()(self)

register_tool #

register_tool(agent)

Register a tool to be both proposed and executed by an agent.

Equivalent to calling both register_for_llm and register_for_execution with the same agent.

Note: This will not make the agent recommend and execute the call in the one step. If the agent recommends the tool, it will need to be the next agent to speak in order to execute the tool.

PARAMETER DESCRIPTION
agent

The agent to which the tool will be registered.

TYPE: ConversableAgent

Source code in autogen/tools/tool.py
def register_tool(self, agent: "ConversableAgent") -> None:
    """Register a tool to be both proposed and executed by an agent.

    Equivalent to calling both `register_for_llm` and `register_for_execution` with the same agent.

    Note: This will not make the agent recommend and execute the call in the one step. If the agent
    recommends the tool, it will need to be the next agent to speak in order to execute the tool.

    Args:
        agent (ConversableAgent): The agent to which the tool will be registered.
    """
    self.register_for_llm(agent)
    self.register_for_execution(agent)