文件源码
import os
from typing import Any
from mcp.client.stdio import stdio_client
from mcp import ClientSession, StdioServerParameters
from langchain_community.llms.sparkllm import SparkLLM
import asyncio
from mcp import ClientSession, StdioServerParameters
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain.agents import create_react_agent, AgentExecutor, StructuredChatAgent
from langchain.prompts import PromptTemplate
os.environ["IFLYTEK_SPARK_APP_ID"] = "xxx"
os.environ["IFLYTEK_SPARK_API_KEY"] = "xxx"
os.environ["IFLYTEK_SPARK_API_SECRET"] = "xxx"
MCP_SERVER_SCRIPT = "/langchain_learn/mcp学习/base_mcp_tool_study2.py"
PYTHON_PATH = "/miniforge3/envs/langchain/bin/python"
model = SparkLLM(
model="SparkLLM/Max-32K",
api_url="wss://spark-api.xf-yun.com/chat/max-32k",
)
prompt_template = """
你必须生成严格符合以下JSON格式的响应:
{{
"Thought": "思考内容",
"Action": "{tool_names}中的一个工具名",
"Action Input": {{
"参数1": 参数值,
"参数2": 参数值,
...
}}
}}
可用工具描述:
{tools}
问题:
{input}
已执行步骤(JSON数组):
{agent_scratchpad}
"""
prompt = PromptTemplate.from_template(template=prompt_template)
async def run_agent():
server_params = StdioServerParameters(
command=PYTHON_PATH,
args=[MCP_SERVER_SCRIPT],
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await load_mcp_tools(session)
agent = StructuredChatAgent.from_llm_and_tools(llm=model, tools=tools, prompt=prompt)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
handle_parsing_errors=True,
max_iterations=5,
verbose=True
)
response = await agent_executor.arun({
"input": "请列出 /work/langchain_learn 文件夹的文件,并计算 (15**2 - 3)/4 的值"
})
return response
if __name__ == "__main__":
result = asyncio.run(run_agent())
print("最终结果:", result)
运行结果
