Skip to main content
View the source code for this module on GitHub: https://github.com/mcp-use/mcp-use/blob/main/mcp_use/agents/mcpagent.py
MCP: Main integration module with customizable system prompt. This module provides the main MCPAgent class that integrates all components to provide a simple interface for using MCP tools with different LLMs.

MCPAgent

from mcp_use.agents.mcpagent import MCPAgent

method init

Initialize a new MCPAgent instance.Parameters
llm
langchain_core.language_models.base.BaseLanguageModel | None
default:"None"
The LangChain LLM to use. Not required if agent_id is provided for remote execution.
client
mcp_use.client.MCPClient | None
default:"None"
The MCPClient to use. If provided, connector is ignored.
connectors
list[mcp_use.connectors.base.BaseConnector] | None
default:"None"
A list of MCP connectors to use if client is not provided.
max_steps
int
default:"5"
The maximum number of steps to take.
auto_initialize
bool
default:"False"
Whether to automatically initialize the agent when run is called.
memory_enabled
bool
default:"True"
Whether to maintain conversation history for context.
system_prompt
str | None
default:"None"
Complete system prompt to use (overrides template if provided).
system_prompt_template
str | None
default:"None"
Template for system prompt with placeholder.
additional_instructions
str | None
default:"None"
Extra instructions to append to the system prompt.
disallowed_tools
list[str] | None
default:"None"
List of tool names that should not be available to the agent.
tools_used_names
list[str] | None
default:"None"
List of tools
use_server_manager
bool
default:"False"
Whether to use server manager mode instead of exposing all tools.
server_manager
mcp_use.managers.base.BaseServerManager | None
default:"None"
Server name or configuration
verbose
bool
default:"False"
Enable debug/verbose mode
agent_id
str | None
default:"None"
Remote agent ID for remote execution. If provided, creates a remote agent.
api_key
str | None
default:"None"
API key for remote execution. If None, checks MCP_USE_API_KEY env var.
base_url
str
default:"https://cloud.mcp-use.com"
Base URL for remote API calls.
callbacks
list | None
default:"None"
List of LangChain callbacks to use. If None and Langfuse is configured, uses langfuse_handler.
chat_id
str | None
default:"None"
String value
retry_on_error
bool
default:"True"
Whether to retry tool calls that fail due to validation errors.
max_retries_per_step
int
default:"2"
Maximum number of retries for validation errors per step.
Signature
def __init__(llm: langchain_core.language_models.base.BaseLanguageModel | None = None, client: mcp_use.client.MCPClient | None = None, connectors: list[mcp_use.connectors.base.BaseConnector] | None = None, max_steps: int = 5, auto_initialize: bool = False, memory_enabled: bool = True, system_prompt: str | None = None, system_prompt_template: str | None = None, additional_instructions: str | None = None, disallowed_tools: list[str] | None = None, tools_used_names: list[str] | None = None, use_server_manager: bool = False, server_manager: mcp_use.managers.base.BaseServerManager | None = None, verbose: bool = False, agent_id: str | None = None, api_key: str | None = None, base_url: str = "https://cloud.mcp-use.com", callbacks: list | None = None, chat_id: str | None = None, retry_on_error: bool = True, max_retries_per_step: int = 2):

method add_to_history

Add a message to the conversation history.Parameters
message
langchain_core.messages.base.BaseMessage
required
The message to add.
Signature
def add_to_history(message: langchain_core.messages.base.BaseMessage):

method clear_conversation_history

Clear the conversation history.Signature
def clear_conversation_history():

method close

Close the MCP connection with improved error handling.Signature
def close():

method get_conversation_history

Get the current conversation history.Returns
returns
list[langchain_core.messages.base.BaseMessage]
The list of conversation messages.
Signature
def get_conversation_history():

method get_disallowed_tools

Get the list of tools that are not available to the agent.Returns
returns
list[str]
List of tool names that are not available.
Signature
def get_disallowed_tools():

method get_system_message

Get the current system message.Returns
returns
langchain_core.messages.system.SystemMessage | None
The current system message, or None if not set.
Signature
def get_system_message():

method initialize

Initialize the MCP client and agent.Signature
def initialize():

method run

Run a query using the MCP tools and return the final result.This method uses the streaming implementation internally and returns the final result after consuming all intermediate steps. If output_schema is provided, the agent will be schema-aware and return structured output.Example:
# Regular usage
result = await agent.run("What's the weather like?")

# Structured output usage
from pydantic import BaseModel, Field

class WeatherInfo(BaseModel):
    temperature: float = Field(description="Temperature in Celsius")
    condition: str = Field(description="Weather condition")

weather: WeatherInfo = await agent.run(
    "What's the weather like?",
    output_schema=WeatherInfo
)
Parameters
query
str
required
The query to run.
max_steps
int | None
default:"None"
Optional maximum number of steps to take.
manage_connector
bool
default:"True"
Whether to handle the connector lifecycle internally.
external_history
list[langchain_core.messages.base.BaseMessage] | None
default:"None"
Optional external history to use instead of the
output_schema
type[~T] | None
default:"None"
Optional Pydantic BaseModel class for structured output.
Returns
returns
str | mcp_use.agents.mcpagent.T
The result of running the query as a string, or if output_schema is provided, an instance of the specified Pydantic model.
Signature
def run(
query: str,
    max_steps: int | None = None,
    manage_connector: bool = True,
    external_history: list[langchain_core.messages.base.BaseMessage] | None = None,
    output_schema: type[~T] | None = None
):

method set_disallowed_tools

Set the list of tools that should not be available to the agent.This will take effect the next time the agent is initialized.Parameters
disallowed_tools
list[str]
required
List of tool names that should not be available.
Signature
def set_disallowed_tools(disallowed_tools: list[str]):

method set_system_message

Set a new system message.Parameters
message
str
required
The new system message content.
Signature
def set_system_message(message: str):

method stream

Run the agent and yield intermediate steps as an async generator.Yields: Intermediate steps as (AgentAction, str) tuples, followed by the final result. If output_schema is provided, yields structured output as instance of the schema.Parameters
query
str
required
The query to run.
max_steps
int | None
default:"None"
Optional maximum number of steps to take.
manage_connector
bool
default:"True"
Whether to handle the connector lifecycle internally.
external_history
list[langchain_core.messages.base.BaseMessage] | None
default:"None"
Optional external history to use instead of the
track_execution
bool
default:"True"
Whether to track execution for telemetry.
output_schema
type[~T] | None
default:"None"
Optional Pydantic BaseModel class for structured output.
Returns
returns
AsyncGenerator
Signature
def stream(
query: str,
    max_steps: int | None = None,
    manage_connector: bool = True,
    external_history: list[langchain_core.messages.base.BaseMessage] | None = None,
    track_execution: bool = True,
    output_schema: type[~T] | None = None
):

method stream_events

Asynchronous streaming interface.Example::async for chunk in agent.astream(“hello”): print(chunk, end=”|”, flush=True)Parameters
query
str
required
Query string or input
max_steps
int | None
default:"None"
Integer value
manage_connector
bool
default:"True"
Connector instance
external_history
list[langchain_core.messages.base.BaseMessage] | None
default:"None"
List of items
Returns
returns
AsyncIterator
Signature
def stream_events(
query: str,
    max_steps: int | None = None,
    manage_connector: bool = True,
    external_history: list[langchain_core.messages.base.BaseMessage] | None = None
):
I