Sampling
Sampling allows MCP tools to request LLM completions during their execution.
Configuration
To enable sampling, provide asampling_callback function when initializing the MCPClient:
Enable LLM sampling capabilities for MCP tools
sampling_callback function when initializing the MCPClient:
from mcp_use.client import MCPClient
from mcp.client.session import ClientSession
from mcp.types import (
CreateMessageRequestParams,
CreateMessageResult,
ErrorData,
TextContent
)
async def sampling_callback(
context: ClientSession,
params: CreateMessageRequestParams
) -> CreateMessageResult | ErrorData:
"""
Your sampling callback implementation.
This function receives a prompt and returns an LLM response.
"""
# Integrate with your LLM of choice (OpenAI, Anthropic, etc.)
response = await your_llm.complete(params.messages[-1].content.text)
return CreateMessageResult(
content=TextContent(text=response, type="text"),
model="your-model-name",
role="assistant"
)
# Initialize client with sampling support
client = MCPClient(
config="config.json",
sampling_callback=sampling_callback
)
from fastmcp import Context, FastMCP
mcp = FastMCP(name="MyServer")
@mcp.tool
async def analyze_sentiment(text: str, ctx: Context) -> str:
"""Analyze the sentiment of text using the client's LLM."""
prompt = f"""Analyze the sentiment of the following text as positive, negative, or neutral.
Just output a single word - 'positive', 'negative', or 'neutral'.
Text to analyze: {text}"""
# Request LLM analysis through sampling
response = await ctx.sample(prompt)
return response.text.strip()
# Without sampling callback
client = MCPClient(config="config.json") # No sampling_callback
# Tool that requires sampling will return an error
result = await session.call_tool("analyze_sentiment", {"text": "Hello"})
# result.isError will be True
Was this page helpful?