LangChain Tools工具使用
未完待续
关于大模型工具使用有关前置知识和原理,已经在下面文章提到:
1. 概述
未完待续
2. LangChain Tools工具使用
/my_tools.py
from langchain.tools import tool
from pydantic import BaseModel
from pydantic import Field
class FiledInfo(BaseModel):
"""
定义参数信息
"""
city: str = Field(description='城市')
@tool(args_schema=FiledInfo, description='根据城市名称获取温度')
def tp_tool(city: str) -> int:
print('=======tp_tool=======')
if city == '北京':
return 12
elif city == '武汉':
return 23
elif city == '沈阳':
return -10
elif city == '泉州':
return 27
else:
return None
if __name__ == '__main__':
print( tp_tool.invoke({'city': '沈阳'}) )
pip install langchain-classic/test_tool2.py
import os
from langchain_classic.agents import create_tool_calling_agent
from langchain_classic.agents import AgentExecutor
from langchain.chat_models import init_chat_model
from langchain_core.prompts import ChatPromptTemplate
from my_tool import tp_tool
llm = init_chat_model(
model = 'deepseek-chat',
model_provider = 'openai',
api_key = os.getenv('DSKEY'),
base_url = 'https://api.deepseek.com'
)
# 定义 Prompt
prompt = ChatPromptTemplate.from_messages([
("system", "你是一个天气查询助手"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
])
# 创建 Agent
agent = create_tool_calling_agent(llm, [tp_tool], prompt)
agent_executor = AgentExecutor(agent=agent, tools=[tp_tool], verbose=True)
# 执行
result = agent_executor.invoke({"input": "泉州温度多少?"})
print('*****result = ', result)
日志输出:
D:\PycharmProjects\langchain-test\.venv\Scripts\python.exe D:\PycharmProjects\langchain-test\test_tool2.py
> Entering new AgentExecutor chain...
Invoking: `k` with `{'city': '泉州'}`
responded: 我来帮您查询泉州的温度。
=======tp_tool=======
27根据查询结果,泉州当前的温度是 **27°C**。
> Finished chain.
*****result = {'input': '泉州温度多少?', 'output': '根据查询结果,泉州当前的温度是 **27°C**。'}
Process finished with exit code 0"如果文章对您有帮助,可以请作者喝杯咖啡吗?"
微信支付
支付宝
LangChain Tools工具使用
https://blog.liuzijian.com/post/2025/12/24/langchain-tools/