Skip to content

AgentBuilder

autogen.agentchat.contrib.captainagent.AgentBuilder #

AgentBuilder(config_file_or_env='OAI_CONFIG_LIST', config_file_location='', builder_model=[], agent_model=[], builder_model_tags=[], agent_model_tags=[], max_agents=5)

AgentBuilder can help user build an automatic task solving process powered by multi-agent system. Specifically, our building pipeline includes initialize and build.

(These APIs are experimental and may change in the future.)

PARAMETER DESCRIPTION
config_file_or_env

Path to the config file or name of the environment variable containing the OpenAI API configurations. Defaults to "OAI_CONFIG_LIST".

TYPE: Optional[str] DEFAULT: 'OAI_CONFIG_LIST'

config_file_location

Location of the config file if not in the current directory. Defaults to "".

TYPE: Optional[str] DEFAULT: ''

builder_model

Model identifier(s) to use as the builder/manager model that coordinates agent creation. Can be a string or list of strings. Filters the config list to match these models. Defaults to [].

TYPE: Optional[Union[str, list]] DEFAULT: []

agent_model

Model identifier(s) to use for the generated participant agents. Can be a string or list of strings. Defaults to [].

TYPE: Optional[Union[str, list]] DEFAULT: []

builder_model_tags

Tags to filter which models from the config can be used as builder models. Defaults to [].

TYPE: Optional[list] DEFAULT: []

agent_model_tags

Tags to filter which models from the config can be used as agent models. Defaults to [].

TYPE: Optional[list] DEFAULT: []

max_agents

Maximum number of agents to create for each task. Defaults to 5.

TYPE: Optional[int] DEFAULT: 5

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def __init__(
    self,
    config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
    config_file_location: Optional[str] = "",
    builder_model: Optional[Union[str, list]] = [],
    agent_model: Optional[Union[str, list]] = [],
    builder_model_tags: Optional[list] = [],
    agent_model_tags: Optional[list] = [],
    max_agents: Optional[int] = 5,
):
    """(These APIs are experimental and may change in the future.)

    Args:
        config_file_or_env (Optional[str], optional): Path to the config file or name of the environment
            variable containing the OpenAI API configurations. Defaults to "OAI_CONFIG_LIST".
        config_file_location (Optional[str], optional): Location of the config file if not in the
            current directory. Defaults to "".
        builder_model (Optional[Union[str, list]], optional): Model identifier(s) to use as the
            builder/manager model that coordinates agent creation. Can be a string or list of strings.
            Filters the config list to match these models. Defaults to [].
        agent_model (Optional[Union[str, list]], optional): Model identifier(s) to use for the
            generated participant agents. Can be a string or list of strings. Defaults to [].
        builder_model_tags (Optional[list], optional): Tags to filter which models from the config
            can be used as builder models. Defaults to [].
        agent_model_tags (Optional[list], optional): Tags to filter which models from the config
            can be used as agent models. Defaults to [].
        max_agents (Optional[int], optional): Maximum number of agents to create for each task.
            Defaults to 5.
    """
    builder_model = builder_model if isinstance(builder_model, list) else [builder_model]
    builder_filter_dict = {}
    if len(builder_model) != 0:
        builder_filter_dict.update({"model": builder_model})
    if len(builder_model_tags) != 0:
        builder_filter_dict.update({"tags": builder_model_tags})
    builder_config_list = config_list_from_json(
        config_file_or_env, file_location=config_file_location, filter_dict=builder_filter_dict
    )
    if len(builder_config_list) == 0:
        raise RuntimeError(
            f"Fail to initialize build manager: {builder_model}{builder_model_tags} does not exist in {config_file_or_env}. "
            f'If you want to change this model, please specify the "builder_model" in the constructor.'
        )
    self.builder_model = OpenAIWrapper(config_list=builder_config_list)

    self.agent_model = agent_model if isinstance(agent_model, list) else [agent_model]
    self.agent_model_tags = agent_model_tags
    self.config_file_or_env = config_file_or_env
    self.config_file_location = config_file_location

    self.building_task: str = None
    self.agent_configs: list[dict[str, Any]] = []
    self.open_ports: list[str] = []
    self.agent_procs: dict[str, tuple[sp.Popen, str]] = {}
    self.agent_procs_assign: dict[str, tuple[ConversableAgent, str]] = {}
    self.cached_configs: dict = {}

    self.max_agents = max_agents

online_server_name class-attribute instance-attribute #

online_server_name = 'online'

DEFAULT_PROXY_AUTO_REPLY class-attribute instance-attribute #

DEFAULT_PROXY_AUTO_REPLY = 'There is no code from the last 1 message for me to execute. Group chat manager should let other participants to continue the conversation. If the group chat manager want to end the conversation, you should let other participant reply me only with "TERMINATE"'

GROUP_CHAT_DESCRIPTION class-attribute instance-attribute #

GROUP_CHAT_DESCRIPTION = ' # Group chat instruction\nYou are now working in a group chat with different expert and a group chat manager.\nYou should refer to the previous message from other participant members or yourself, follow their topic and reply to them.\n\n**Your role is**: {name}\nGroup chat members: {members}{user_proxy_desc}\n\nWhen the task is complete and the result has been carefully verified, after obtaining agreement from the other members, you can end the conversation by replying only with "TERMINATE".\n\n# Your profile\n{sys_msg}\n'

DEFAULT_DESCRIPTION class-attribute instance-attribute #

DEFAULT_DESCRIPTION = "## Your role\n[Complete this part with expert's name and skill description]\n\n## Task and skill instructions\n- [Complete this part with task description]\n- [Complete this part with skill description]\n- [(Optional) Complete this part with other information]\n"

CODING_AND_TASK_SKILL_INSTRUCTION class-attribute instance-attribute #

CODING_AND_TASK_SKILL_INSTRUCTION = "## Useful instructions for task-solving\n- Solve the task step by step if you need to.\n- When you find an answer, verify the answer carefully. Include verifiable evidence with possible test case in your response if possible.\n- All your reply should be based on the provided facts.\n\n## How to verify?\n**You have to keep believing that everyone else's answers are wrong until they provide clear enough evidence.**\n- Verifying with step-by-step backward reasoning.\n- Write test cases according to the general task.\n\n## How to use code?\n- Suggest python code (in a python coding block) or shell script (in a sh coding block) for the Computer_terminal to execute.\n- If missing python packages, you can install the package by suggesting a `pip install` code in the ```sh ... ``` block.\n- When using code, you must indicate the script type in the coding block.\n- Do not the coding block which requires users to modify.\n- Do not suggest a coding block if it's not intended to be executed by the Computer_terminal.\n- The Computer_terminal cannot modify your code.\n- **Use 'print' function for the output when relevant**.\n- Check the execution result returned by the Computer_terminal.\n- Do not ask Computer_terminal to copy and paste the result.\n- If the result indicates there is an error, fix the error and output the code again. "

CODING_PROMPT class-attribute instance-attribute #

CODING_PROMPT = 'Does the following task need programming (i.e., access external API or tool by coding) to solve,\nor coding may help the following task become easier?\n\nTASK: {task}\n\nAnswer only YES or NO.\n'

AGENT_NAME_PROMPT class-attribute instance-attribute #

AGENT_NAME_PROMPT = '# Your task\nSuggest no more than {max_agents} experts with their name according to the following user requirement.\n\n## User requirement\n{task}\n\n# Task requirement\n- Expert\'s name should follow the format: [skill]_Expert.\n- Only reply the names of the experts, separated by ",".\n- If coding skills are required, they should be limited to Python and Shell.\nFor example: Python_Expert, Math_Expert, ... '

AGENT_SYS_MSG_PROMPT class-attribute instance-attribute #

AGENT_SYS_MSG_PROMPT = '# Your goal\n- According to the task and expert name, write a high-quality description for the expert by filling the given template.\n- Ensure that your description are clear and unambiguous, and include all necessary information.\n\n# Task\n{task}\n\n# Expert name\n{position}\n\n# Template\n{default_sys_msg}\n'

AGENT_DESCRIPTION_PROMPT class-attribute instance-attribute #

AGENT_DESCRIPTION_PROMPT = "# Your goal\nSummarize the following expert's description in a sentence.\n\n# Expert name\n{position}\n\n# Expert's description\n{sys_msg}\n"

AGENT_SEARCHING_PROMPT class-attribute instance-attribute #

AGENT_SEARCHING_PROMPT = '# Your goal\nConsidering the following task, what experts should be involved to the task?\n\n# TASK\n{task}\n\n# EXPERT LIST\n{agent_list}\n\n# Requirement\n- You should consider if the experts\' name and profile match the task.\n- Considering the effort, you should select less then {max_agents} experts; less is better.\n- Separate expert names by commas and use "_" instead of space. For example, Product_manager,Programmer\n- Only return the list of expert names.\n'

AGENT_SELECTION_PROMPT class-attribute instance-attribute #

AGENT_SELECTION_PROMPT = '# Your goal\nMatch roles in the role set to each expert in expert set.\n\n# Skill set\n{skills}\n\n# Expert pool (formatting with name: description)\n{expert_pool}\n\n# Answer format\n```json\n{{\n    "skill_1 description": "expert_name: expert_description", // if there exists an expert that suitable for skill_1\n    "skill_2 description": "None", // if there is no experts that suitable for skill_2\n    ...\n}}\n```\n'

builder_model instance-attribute #

builder_model = OpenAIWrapper(config_list=builder_config_list)

agent_model instance-attribute #

agent_model = agent_model if isinstance(agent_model, list) else [agent_model]

agent_model_tags instance-attribute #

agent_model_tags = agent_model_tags

config_file_or_env instance-attribute #

config_file_or_env = config_file_or_env

config_file_location instance-attribute #

config_file_location = config_file_location

building_task instance-attribute #

building_task = None

agent_configs instance-attribute #

agent_configs = []

open_ports instance-attribute #

open_ports = []

agent_procs instance-attribute #

agent_procs = {}

agent_procs_assign instance-attribute #

agent_procs_assign = {}

cached_configs instance-attribute #

cached_configs = {}

max_agents instance-attribute #

max_agents = max_agents

set_builder_model #

set_builder_model(model)
Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def set_builder_model(self, model: str):
    self.builder_model = model

set_agent_model #

set_agent_model(model)
Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def set_agent_model(self, model: str):
    self.agent_model = model

clear_agent #

clear_agent(agent_name, recycle_endpoint=True)

Clear a specific agent by name.

PARAMETER DESCRIPTION
agent_name

the name of agent.

TYPE: str

recycle_endpoint

trigger for recycle the endpoint server. If true, the endpoint will be recycled when there is no agent depending on.

TYPE: Optional[bool] DEFAULT: True

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True):
    """Clear a specific agent by name.

    Args:
        agent_name: the name of agent.
        recycle_endpoint: trigger for recycle the endpoint server. If true, the endpoint will be recycled
            when there is no agent depending on.
    """
    _, server_id = self.agent_procs_assign[agent_name]
    del self.agent_procs_assign[agent_name]
    if recycle_endpoint:
        if server_id == self.online_server_name:
            return
        else:
            for _, iter_sid in self.agent_procs_assign.values():
                if server_id == iter_sid:
                    return
            self.agent_procs[server_id][0].terminate()
            self.open_ports.append(server_id.split("_")[-1])
    print(colored(f"Agent {agent_name} has been cleared.", "yellow"), flush=True)

clear_all_agents #

clear_all_agents(recycle_endpoint=True)

Clear all cached agents.

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def clear_all_agents(self, recycle_endpoint: Optional[bool] = True):
    """Clear all cached agents."""
    for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
        self.clear_agent(agent_name, recycle_endpoint)
    print(colored("All agents have been cleared.", "yellow"), flush=True)

build #

build(building_task, default_llm_config, coding=None, code_execution_config=None, use_oai_assistant=False, user_proxy=None, max_agents=None, **kwargs)

Auto build agents based on the building task.

PARAMETER DESCRIPTION
building_task

instruction that helps build manager (gpt-4) to decide what agent should be built.

TYPE: str

default_llm_config

specific configs for LLM (e.g., config_list, seed, temperature, ...).

TYPE: dict[str, Any]

coding

use to identify if the user proxy (a code interpreter) should be added.

TYPE: Optional[bool] DEFAULT: None

code_execution_config

specific configs for user proxy (e.g., last_n_messages, work_dir, ...).

TYPE: Optional[dict[str, Any]] DEFAULT: None

use_oai_assistant

use OpenAI assistant api instead of self-constructed agent.

TYPE: Optional[bool] DEFAULT: False

user_proxy

user proxy's class that can be used to replace the default user proxy.

TYPE: Optional[ConversableAgent] DEFAULT: None

max_agents

Maximum number of agents to create for the task. If None, uses the value from self.max_agents.

TYPE: Optional[int], default=None DEFAULT: None

**kwargs

Additional arguments to pass to _build_agents. - agent_configs: Optional list of predefined agent configurations to use.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
agent_list

a list of agents.

TYPE: list[ConversableAgent]

cached_configs

cached configs.

TYPE: dict[str, Any]

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def build(
    self,
    building_task: str,
    default_llm_config: dict[str, Any],
    coding: Optional[bool] = None,
    code_execution_config: Optional[dict[str, Any]] = None,
    use_oai_assistant: Optional[bool] = False,
    user_proxy: Optional[ConversableAgent] = None,
    max_agents: Optional[int] = None,
    **kwargs: Any,
) -> tuple[list[ConversableAgent], dict[str, Any]]:
    """Auto build agents based on the building task.

    Args:
        building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
        default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
        coding: use to identify if the user proxy (a code interpreter) should be added.
        code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
        use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
        user_proxy: user proxy's class that can be used to replace the default user proxy.
        max_agents (Optional[int], default=None): Maximum number of agents to create for the task. If None, uses the value from self.max_agents.
        **kwargs (Any): Additional arguments to pass to _build_agents.
            - agent_configs: Optional list of predefined agent configurations to use.

    Returns:
        agent_list: a list of agents.
        cached_configs: cached configs.
    """
    if code_execution_config is None:
        code_execution_config = {
            "last_n_messages": 1,
            "work_dir": "groupchat",
            "use_docker": False,
            "timeout": 10,
        }

    if max_agents is None:
        max_agents = self.max_agents

    agent_configs = kwargs.get("agent_configs", [])
    self.building_task = building_task

    print(colored("==> Generating agents...", "green"), flush=True)
    resp_agent_name = (
        self.builder_model.create(
            messages=[
                {
                    "role": "user",
                    "content": self.AGENT_NAME_PROMPT.format(task=building_task, max_agents=max_agents),
                }
            ]
        )
        .choices[0]
        .message.content
    )
    agent_name_list = [agent_name.strip().replace(" ", "_") for agent_name in resp_agent_name.split(",")]
    print(f"{agent_name_list} are generated.", flush=True)

    print(colored("==> Generating system message...", "green"), flush=True)
    agent_sys_msg_list = []
    for name in agent_name_list:
        print(f"Preparing system message for {name}", flush=True)
        resp_agent_sys_msg = (
            self.builder_model.create(
                messages=[
                    {
                        "role": "user",
                        "content": self.AGENT_SYS_MSG_PROMPT.format(
                            task=building_task,
                            position=name,
                            default_sys_msg=self.DEFAULT_DESCRIPTION,
                        ),
                    }
                ]
            )
            .choices[0]
            .message.content
        )
        agent_sys_msg_list.append(resp_agent_sys_msg)

    print(colored("==> Generating description...", "green"), flush=True)
    agent_description_list = []
    for name, sys_msg in list(zip(agent_name_list, agent_sys_msg_list)):
        print(f"Preparing description for {name}", flush=True)
        resp_agent_description = (
            self.builder_model.create(
                messages=[
                    {
                        "role": "user",
                        "content": self.AGENT_DESCRIPTION_PROMPT.format(position=name, sys_msg=sys_msg),
                    }
                ]
            )
            .choices[0]
            .message.content
        )
        agent_description_list.append(resp_agent_description)

    for name, sys_msg, description in list(zip(agent_name_list, agent_sys_msg_list, agent_description_list)):
        agent_configs.append({
            "name": name,
            "model": self.agent_model,
            "tags": self.agent_model_tags,
            "system_message": sys_msg,
            "description": description,
        })

    if coding is None:
        resp = (
            self.builder_model.create(
                messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
            )
            .choices[0]
            .message.content
        )
        coding = resp == "YES"

    self.cached_configs.update({
        "building_task": building_task,
        "agent_configs": agent_configs,
        "coding": coding,
        "default_llm_config": default_llm_config,
        "code_execution_config": code_execution_config,
    })
    _config_check(self.cached_configs)
    return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)

build_from_library #

build_from_library(building_task, library_path_or_json, default_llm_config, top_k=3, coding=None, code_execution_config=None, use_oai_assistant=False, embedding_model='all-mpnet-base-v2', user_proxy=None, **kwargs)

Build agents from a library. The library is a list of agent configs, which contains the name and system_message for each agent. We use a build manager to decide what agent in that library should be involved to the task.

PARAMETER DESCRIPTION
building_task

instruction that helps build manager (gpt-4) to decide what agent should be built.

TYPE: str

library_path_or_json

path or JSON string config of agent library.

TYPE: str

default_llm_config

specific configs for LLM (e.g., config_list, seed, temperature, ...).

TYPE: dict[str, Any]

top_k

number of results to return.

TYPE: int DEFAULT: 3

coding

use to identify if the user proxy (a code interpreter) should be added.

TYPE: Optional[bool] DEFAULT: None

code_execution_config

specific configs for user proxy (e.g., last_n_messages, work_dir, ...).

TYPE: Optional[dict[str, Any]] DEFAULT: None

use_oai_assistant

use OpenAI assistant api instead of self-constructed agent.

TYPE: Optional[bool] DEFAULT: False

embedding_model

a Sentence-Transformers model use for embedding similarity to select agents from library. As reference, chromadb use "all-mpnet-base-v2" as default.

TYPE: Optional[str] DEFAULT: 'all-mpnet-base-v2'

user_proxy

user proxy's class that can be used to replace the default user proxy.

TYPE: Optional[ConversableAgent] DEFAULT: None

**kwargs

Additional arguments to pass to _build_agents.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
agent_list

a list of agents.

TYPE: list[ConversableAgent]

cached_configs

cached configs.

TYPE: dict[str, Any]

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def build_from_library(
    self,
    building_task: str,
    library_path_or_json: str,
    default_llm_config: dict[str, Any],
    top_k: int = 3,
    coding: Optional[bool] = None,
    code_execution_config: Optional[dict[str, Any]] = None,
    use_oai_assistant: Optional[bool] = False,
    embedding_model: Optional[str] = "all-mpnet-base-v2",
    user_proxy: Optional[ConversableAgent] = None,
    **kwargs: Any,
) -> tuple[list[ConversableAgent], dict[str, Any]]:
    """Build agents from a library.
    The library is a list of agent configs, which contains the name and system_message for each agent.
    We use a build manager to decide what agent in that library should be involved to the task.

    Args:
        building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
        library_path_or_json: path or JSON string config of agent library.
        default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
        top_k: number of results to return.
        coding: use to identify if the user proxy (a code interpreter) should be added.
        code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
        use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
        embedding_model: a Sentence-Transformers model use for embedding similarity to select agents from library.
            As reference, chromadb use "all-mpnet-base-v2" as default.
        user_proxy: user proxy's class that can be used to replace the default user proxy.
        **kwargs: Additional arguments to pass to _build_agents.

    Returns:
        agent_list: a list of agents.
        cached_configs: cached configs.
    """
    import sqlite3

    # Some system will have an unexpected sqlite3 version.
    # Check if the user has installed pysqlite3.
    if int(sqlite3.version.split(".")[0]) < 3:
        try:
            __import__("pysqlite3")
            import sys

            sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
        except Exception as e:
            raise e
    import chromadb
    from chromadb.utils import embedding_functions

    if code_execution_config is None:
        code_execution_config = {
            "last_n_messages": 1,
            "work_dir": "groupchat",
            "use_docker": False,
            "timeout": 120,
        }

    try:
        agent_library = json.loads(library_path_or_json)
    except json.decoder.JSONDecodeError:
        with open(library_path_or_json) as f:
            agent_library = json.load(f)
    except Exception as e:
        raise e

    print(colored("==> Looking for suitable agents in the library...", "green"), flush=True)
    skills = building_task.replace(":", " ").split("\n")
    # skills = [line.split("-", 1)[1].strip() if line.startswith("-") else line for line in lines]
    if len(skills) == 0:
        skills = [building_task]

    chroma_client = chromadb.Client()
    collection = chroma_client.create_collection(
        name="agent_list",
        embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(model_name=embedding_model),
    )
    collection.add(
        documents=[agent["description"] for agent in agent_library],
        metadatas=[{"source": "agent_profile"} for _ in range(len(agent_library))],
        ids=[f"agent_{i}" for i in range(len(agent_library))],
    )
    agent_desc_list = set()
    for skill in skills:
        recall = set(collection.query(query_texts=[skill], n_results=top_k)["documents"][0])
        agent_desc_list = agent_desc_list.union(recall)

    agent_config_list = []
    for description in list(agent_desc_list):
        for agent in agent_library:
            if agent["description"] == description:
                agent_config_list.append(agent.copy())
                break
    chroma_client.delete_collection(collection.name)

    # double recall from the searching result
    expert_pool = [f"{agent['name']}: {agent['description']}" for agent in agent_config_list]
    while True:
        skill_agent_pair_json = (
            self.builder_model.create(
                messages=[
                    {
                        "role": "user",
                        "content": self.AGENT_SELECTION_PROMPT.format(
                            skills=building_task, expert_pool=expert_pool, max_agents=self.max_agents
                        ),
                    }
                ]
            )
            .choices[0]
            .message.content
        )
        try:
            skill_agent_pair_json = _retrieve_json(skill_agent_pair_json)
            skill_agent_pair = json.loads(skill_agent_pair_json)
            break
        except Exception as e:
            print(e, flush=True)
            time.sleep(5)
            continue

    recalled_agent_config_list = []
    recalled_name_desc = []
    for skill, agent_profile in skill_agent_pair.items():
        # If no suitable agent, generate an agent
        if agent_profile == "None":
            _, agent_config_temp = self.build(
                building_task=skill,
                default_llm_config=default_llm_config.copy(),
                coding=False,
                use_oai_assistant=use_oai_assistant,
                max_agents=1,
            )
            self.clear_agent(agent_config_temp["agent_configs"][0]["name"])
            recalled_agent_config_list.append(agent_config_temp["agent_configs"][0])
        else:
            if agent_profile in recalled_name_desc:
                # prevent identical agents
                continue
            recalled_name_desc.append(agent_profile)
            name = agent_profile.split(":")[0].strip()
            desc = agent_profile.split(":")[1].strip()
            for agent in agent_config_list:
                if name == agent["name"] and desc == agent["description"]:
                    recalled_agent_config_list.append(agent.copy())

    print(f"{[agent['name'] for agent in recalled_agent_config_list]} are selected.", flush=True)

    if coding is None:
        resp = (
            self.builder_model.create(
                messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
            )
            .choices[0]
            .message.content
        )
        coding = resp == "YES"

    self.cached_configs.update({
        "building_task": building_task,
        "agent_configs": recalled_agent_config_list,
        "coding": coding,
        "default_llm_config": default_llm_config,
        "code_execution_config": code_execution_config,
    })
    _config_check(self.cached_configs)

    return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)

save #

save(filepath=None)

Save building configs. If the filepath is not specific, this function will create a filename by encrypt the building_task string by md5 with "save_config_" prefix, and save config to the local path.

PARAMETER DESCRIPTION
filepath

save path.

TYPE: Optional[str] DEFAULT: None

Return

filepath: path save.

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def save(self, filepath: Optional[str] = None) -> str:
    """Save building configs. If the filepath is not specific, this function will create a filename by encrypt the
    building_task string by md5 with "save_config_" prefix, and save config to the local path.

    Args:
        filepath: save path.

    Return:
        filepath: path save.
    """
    if filepath is None:
        filepath = f"./save_config_{hashlib.md5(self.building_task.encode('utf-8')).hexdigest()}.json"
    with open(filepath, "w") as save_file:
        json.dump(self.cached_configs, save_file, indent=4)
    print(colored(f"Building config saved to {filepath}", "green"), flush=True)

    return filepath

load #

load(filepath=None, config_json=None, use_oai_assistant=False, **kwargs)

Load building configs and call the build function to complete building without calling online LLMs' api.

PARAMETER DESCRIPTION
filepath

filepath or JSON string for the save config.

TYPE: Optional[str] DEFAULT: None

config_json

JSON string for the save config.

TYPE: Optional[str] DEFAULT: None

use_oai_assistant

use OpenAI assistant api instead of self-constructed agent.

TYPE: Optional[bool] DEFAULT: False

**kwargs

Additional arguments to pass to _build_agents: - code_execution_config (Optional[dict[str, Any]]): If provided, overrides the code execution configuration from the loaded config.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
agent_list

a list of agents.

TYPE: list[ConversableAgent]

cached_configs

cached configs.

TYPE: dict[str, Any]

Source code in autogen/agentchat/contrib/captainagent/agent_builder.py
def load(
    self,
    filepath: Optional[str] = None,
    config_json: Optional[str] = None,
    use_oai_assistant: Optional[bool] = False,
    **kwargs: Any,
) -> tuple[list[ConversableAgent], dict[str, Any]]:
    """Load building configs and call the build function to complete building without calling online LLMs' api.

    Args:
        filepath: filepath or JSON string for the save config.
        config_json: JSON string for the save config.
        use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
        **kwargs (Any): Additional arguments to pass to _build_agents:
            - code_execution_config (Optional[dict[str, Any]]): If provided, overrides the
            code execution configuration from the loaded config.

    Returns:
        agent_list: a list of agents.
        cached_configs: cached configs.
    """
    # load json string.
    if config_json is not None:
        print(colored("Loading config from JSON...", "green"), flush=True)
        cached_configs = json.loads(config_json)

    # load from path.
    if filepath is not None:
        print(colored(f"Loading config from {filepath}", "green"), flush=True)
        with open(filepath) as f:
            cached_configs = json.load(f)

    _config_check(cached_configs)

    agent_configs = cached_configs["agent_configs"]
    default_llm_config = cached_configs["default_llm_config"]
    coding = cached_configs["coding"]

    if kwargs.get("code_execution_config") is not None:
        # for test
        self.cached_configs.update({
            "building_task": cached_configs["building_task"],
            "agent_configs": agent_configs,
            "coding": coding,
            "default_llm_config": default_llm_config,
            "code_execution_config": kwargs["code_execution_config"],
        })
        del kwargs["code_execution_config"]
        return self._build_agents(use_oai_assistant, **kwargs)
    else:
        code_execution_config = cached_configs["code_execution_config"]
        self.cached_configs.update({
            "building_task": cached_configs["building_task"],
            "agent_configs": agent_configs,
            "coding": coding,
            "default_llm_config": default_llm_config,
            "code_execution_config": code_execution_config,
        })
        return self._build_agents(use_oai_assistant, **kwargs)