Create interactive chat interfaces with persistent conversation memory
MCPAgent
, maintaining context and memory across multiple queries.
import asyncio
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from mcp_use import MCPAgent, MCPClient
async def basic_chat_loop():
"""Simple console chat loop with MCPAgent"""
# Load environment variables
load_dotenv()
# MCP server configuration
config = {
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"],
"env": {"DISPLAY": ":1"}
},
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/tmp"]
}
}
}
# Create client and agent
client = MCPClient.from_dict(config)
llm = ChatOpenAI(model="gpt-4o")
agent = MCPAgent(llm=llm,
client=client,
memory_enabled=True, # Enable memory to track conversation history
max_steps=20)
# Some initial messages
print("🤖 MCP Agent Chat")
print("Type 'quit/exit' to exit the chat.")
print("Type 'clear' to clear conversation history")
try:
while True:
user_input = input("\nYou: ")
if user_input.lower() in ['quit', 'exit']:
print("👋 Goodbye!")
break
if user_input.lower() == 'clear':
agent.clear_conversation_history()
print("🧹 Conversation history cleared.")
continue
# Skip empty messages
if not user_input:
continue
try:
print("\n🤖 Assistant: ", end="", flush=True)
response = await agent.run(user_input)
print(response)
except KeyboardInterrupt: # Handle keyboard interrupt
print("\n\n⏸️ Interrupted by user")
break
except Exception as e:
print(f"\n❌ Error: {e}")
print("Please try again or type 'exit' to quit.")
finally:
await client.close_all_sessions()
if __name__ == "__main__":
asyncio.run(basic_chat_loop())
import asyncio
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from mcp_use import MCPAgent, MCPClient
async def streaming_chat_loop():
"""Chat loop with streaming responses with MCPAgent"""
# Load environment variables
load_dotenv()
# MCP server configuration
config = {
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"],
"env": {"DISPLAY": ":1"}
}
}
}
# Create client and agent
client = MCPClient.from_dict(config)
llm = ChatOpenAI(model="gpt-4o")
agent = MCPAgent(llm=llm,
client=client,
memory_enabled=True, # Enable memory to track conversation history
max_steps=20)
# Some initial messages
print("🤖 MCP Agent Chat (Streaming)")
print("Type 'quit/exit' to exit the chat.")
print("Type 'clear' to clear conversation history")
try:
while True:
user_input = input("\nYou: ")
if user_input.lower() in ['quit', 'exit']:
print("👋 Goodbye!")
break
if user_input.lower() == 'clear':
agent.clear_conversation_history()
print("🧹 Conversation history cleared.")
continue
if not user_input: # Skip empty messages
continue
try:
print("\n🤖 Assistant: ", end="", flush=True)
# Stream the response
async for chunk in agent.stream(user_input):
print(chunk, end="", flush=True)
print()
except KeyboardInterrupt: # Handle keyboard interrupt
print("\n\n⏸️ Interrupted by user")
break
except Exception as e:
print(f"\n❌ Error: {e}")
print("Please try again or type 'exit' to quit.")
finally:
await client.close_all_sessions()
if __name__ == "__main__":
asyncio.run(streaming_chat_loop())
import asyncio
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
from mcp_use import MCPAgent, MCPClient
from typing import Optional
class TaskRequest(BaseModel):
task_type: Optional[str] = Field(description="The type of task to perform")
description: Optional[str] = Field(description="Detailed description of the task")
priority: Optional[str] = Field(description="Priority level: low, medium, high")
async def structured_chat_loop():
"""Chat loop that can handle both natural language and structured inputs."""
# Load environment variables
load_dotenv()
# MCP server configuration
config = {
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"],
"env": {"DISPLAY": ":1"}
}
}
}
# Create client and agent
client = MCPClient.from_dict(config)
llm = ChatOpenAI(model="gpt-4o")
agent = MCPAgent(
llm=llm,
client=client,
memory_enabled=True, # Enable memory to track conversation history
max_steps=20
)
# Initial messages
print("🤖 MCP Agent Chat (Structured)")
print("You can chat naturally or request structured task analysis")
print("Type 'task' to create a structured task request")
try:
while True:
user_input = input("\nYou: ")
if user_input.lower() in ['exit', 'quit']:
print("👋 Goodbye!")
break
try:
if user_input.lower() == 'task':
print("\n📋 Creating structured task...")
task_description = input("Describe your task: ")
task: TaskRequest = await agent.run(
f"Analyze a task with the following description: {task_description}",
output_schema=TaskRequest
)
# Print task analysis
print(f"\n✅ Task Analysis:")
print(f"• Type: {task.task_type}")
print(f"• Description: {task.description}")
print(f"• Priority: {task.priority or 'low'}")
proceed = input("\nDo you want to proceed with this task? (y/n)")
if proceed.lower() == 'y':
response = await agent.run(
f"Execute the following task: {task.description}"
)
print(f"\n🤖 Assistant: {response}")
else:
# Regular conversation
response = await agent.run(user_input)
print(f"\n🤖 Assistant: {response}")
except KeyboardInterrupt:
print("\n👋 Goodbye!")
break
except Exception as e:
print(f"❌ Error: {e}")
print("Please try again or type 'exit' to quit.")
finally:
await client.close_all_sessions()
if __name__ == "__main__":
asyncio.run(structured_chat_loop())