Building a chat loop
With mcp-use you can build interactive interface where users can have conversations with yourMCPAgent, maintaining context and memory across multiple queries.
Basic chat loop
Here’s a basic chat-loop with conversation memory enabled:Copy
Ask AI
import readline from 'node:readline'
import { config } from 'dotenv'
import { ChatOpenAI } from '@langchain/openai'
import { MCPAgent, MCPClient } from 'mcp-use'
config() // Load environment variables
async function basicChatLoop() {
// MCP server configuration
const configuration = {
mcpServers: {
playwright: {
command: 'npx',
args: ['@playwright/mcp@latest'],
env: { DISPLAY: ':1' }
},
filesystem: {
command: 'npx',
args: ['-y', '@modelcontextprotocol/server-filesystem', '/tmp']
}
}
}
// Create client and agent
const client = new MCPClient(configuration)
const llm = new ChatOpenAI({ model: 'gpt-4o' })
const agent = new MCPAgent({
llm,
client,
memoryEnabled: true, // Enable memory to track conversation history
maxSteps: 20
})
// Some initial messages
console.log('🤖 MCP Agent Chat')
console.log('Type \'quit/exit\' to exit the chat.')
console.log('Type \'clear\' to clear conversation history')
// Create readline interface
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
})
const question = (prompt: string): Promise<string> => {
return new Promise((resolve) => {
rl.question(prompt, resolve)
})
}
try {
while (true) {
const userInput = await question('\nYou: ')
if (['quit', 'exit'].includes(userInput.toLowerCase())) {
console.log('👋 Goodbye!')
break
}
if (userInput.toLowerCase() === 'clear') {
agent.clearConversationHistory()
console.log('🧹 Conversation history cleared.')
continue
}
// Skip empty messages
if (!userInput) {
continue
}
try {
process.stdout.write('\n🤖 Assistant: ')
const response = await agent.run(userInput)
console.log(response)
} catch (error) {
console.error(`\n❌ Error: ${error}`)
console.log('Please try again or type \'exit\' to quit.')
}
}
} finally {
rl.close()
await client.closeAllSessions()
}
}
basicChatLoop().catch(console.error)
Streaming Chat Loop
Here’s a chat loop with streaming responses enabled:Copy
Ask AI
import readline from 'node:readline'
import { config } from 'dotenv'
import { ChatOpenAI } from '@langchain/openai'
import { MCPAgent, MCPClient } from 'mcp-use'
config() // Load environment variables
async function streamingChatLoop() {
// MCP server configuration
const configuration = {
mcpServers: {
playwright: {
command: 'npx',
args: ['@playwright/mcp@latest'],
env: { DISPLAY: ':1' }
}
}
}
// Create client and agent
const client = new MCPClient(configuration)
const llm = new ChatOpenAI({ model: 'gpt-4o' })
const agent = new MCPAgent({
llm,
client,
memoryEnabled: true, // Enable memory to track conversation history
maxSteps: 20
})
// Some initial messages
console.log('🤖 MCP Agent Chat (Streaming)')
console.log('Type \'quit/exit\' to exit the chat.')
console.log('Type \'clear\' to clear conversation history')
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
})
const question = (prompt: string): Promise<string> => {
return new Promise((resolve) => {
rl.question(prompt, resolve)
})
}
try {
while (true) {
const userInput = await question('\nYou: ')
if (['quit', 'exit'].includes(userInput.toLowerCase())) {
console.log('👋 Goodbye!')
break
}
if (userInput.toLowerCase() === 'clear') {
agent.clearConversationHistory()
console.log('🧹 Conversation history cleared.')
continue
}
if (!userInput) { // Skip empty messages
continue
}
try {
process.stdout.write('\n🤖 Assistant: ')
// Stream the response
for await (const step of agent.stream(userInput)) {
// In TypeScript, stream returns steps rather than text chunks
// You might want to use streamEvents for token-level streaming
console.log(`\nTool: ${step.action.tool}`)
console.log(`Result: ${step.observation}`)
}
} catch (error) {
console.error(`\n❌ Error: ${error}`)
console.log('Please try again or type \'exit\' to quit.')
}
}
} finally {
rl.close()
await client.closeAllSessions()
}
}
streamingChatLoop().catch(console.error)
Chat Loop with Structured I/O
It’s possible to create a chat loop that can handle both natural language and structured inputs, allowing users to request specific tasks or analyses in a structured format. Here’s an example of how to implement this:Copy
Ask AI
import readline from 'node:readline'
import { z } from 'zod'
import { config } from 'dotenv'
import { ChatOpenAI } from '@langchain/openai'
import { MCPAgent, MCPClient } from 'mcp-use'
config() // Load environment variables
// Define the schema using Zod
const TaskRequest = z.object({
taskType: z.string().optional().describe('The type of task to perform'),
description: z.string().optional().describe('Detailed description of the task'),
priority: z.string().optional().describe('Priority level: low, medium, high')
})
type TaskRequest = z.infer<typeof TaskRequest>
async function structuredChatLoop() {
// MCP server configuration
const configuration = {
mcpServers: {
playwright: {
command: 'npx',
args: ['@playwright/mcp@latest'],
env: { DISPLAY: ':1' }
}
}
}
// Create client and agent
const client = new MCPClient(configuration)
const llm = new ChatOpenAI({ model: 'gpt-4o' })
const agent = new MCPAgent({
llm,
client,
memoryEnabled: true, // Enable memory to track conversation history
maxSteps: 20
})
// Initial messages
console.log('🤖 MCP Agent Chat (Structured)')
console.log('You can chat naturally or request structured task analysis')
console.log('Type \'task\' to create a structured task request')
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
})
const question = (prompt: string): Promise<string> => {
return new Promise((resolve) => {
rl.question(prompt, resolve)
})
}
try {
while (true) {
const userInput = await question('\nYou: ')
if (['exit', 'quit'].includes(userInput.toLowerCase())) {
console.log('👋 Goodbye!')
break
}
try {
if (userInput.toLowerCase() === 'task') {
console.log('\n📋 Creating structured task...')
const taskDescription = await question('Describe your task: ')
const task = await agent.run(
`Analyze a task with the following description: ${taskDescription}`,
undefined, // maxSteps
undefined, // manageConnector
undefined, // externalHistory
TaskRequest // output schema
)
// Print task analysis
console.log('\n✅ Task Analysis:')
console.log(`• Type: ${task.taskType}`)
console.log(`• Description: ${task.description}`)
console.log(`• Priority: ${task.priority || 'low'}`)
const proceed = await question('\nDo you want to proceed with this task? (y/n)')
if (proceed.toLowerCase() === 'y') {
const response = await agent.run(
`Execute the following task: ${task.description}`
)
console.log(`\n🤖 Assistant: ${response}`)
}
} else {
// Regular conversation
const response = await agent.run(userInput)
console.log(`\n🤖 Assistant: ${response}`)
}
} catch (error) {
console.error(`❌ Error: ${error}`)
console.log('Please try again or type \'exit\' to quit.')
}
}
} finally {
rl.close()
await client.closeAllSessions()
}
}
structuredChatLoop().catch(console.error)