2. langgraph中的Tool Calling (How to handle tool calling errors)
1. 工具定义
from langchain_core.tools import tool
@tool
def get_weather(location: str):
"""Call to get the current weather."""
if location == "san francisco":
raise ValueError("Input queries must be proper nouns")
elif location == "San Francisco":
return "It's 60 degrees and foggy."
else:
raise ValueError("Invalid input.")
2. 无错误处理的graph
2.1 graph 定义
from typing import Literal
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.prebuilt import ToolNode
tool_node = ToolNode([get_weather])
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
temperature=0,
model="GLM-4",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
model_with_tools = llm.bind_tools([get_weather])
def should_continue(state: MessagesState):
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def call_model(state: MessagesState):
messages = state["messages"]
response = model_with_tools.invoke(messages)
return {"messages": [response]}
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
workflow.add_edge(START, "agent")
workflow.add_conditional_edges("agent", should_continue, ["tools", END])
workflow.add_edge("tools", "agent")
app = workflow.compile()
2.2 graph 可视化
from IPython.display import Image, display
try:
display(Image(app.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
2.3 错误处理
当你尝试调用工具时,可以看到模型用错误的输入调用了工具,导致工具抛出错误。预构建的ToolNode执行工具有一些内置的错误处理机制,它会捕获错误并将其传回模型,以便模型可以重试。
response = app.invoke(
{"messages": [("human", "what is the weather in san francisco?")]},
)
for message in response["messages"]:
string_representation = f"{message.type.upper()}: {message.content}\n"
print(string_representation)
HUMAN: what is the weather in san francisco?
AI:
TOOL: Error: ValueError('Input queries must be proper nouns')
Please fix your mistakes.
AI:
TOOL: It's 60 degrees and foggy.
AI: The current weather in San Francisco is 60 degrees and foggy.
3. 具备错误处理的graph
3.1 自定义的回退策略
from langchain_core.output_parsers import StrOutputParser
from pydantic import BaseModel, Field
class HaikuRequest(BaseModel):
topic: list[str] = Field(
max_length=3,
min_length=3,
)
@tool
def master_haiku_generator(request: HaikuRequest):
"""Generates a haiku based on the provided topics."""
model = ChatOpenAI(
temperature=0,
model="GLM-4",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
chain = model | StrOutputParser()
topics = ", ".join(request.topic)
haiku = chain.invoke(f"Write a haiku about {topics}")
return haiku
tool_node = ToolNode([master_haiku_generator])
llm = ChatOpenAI(
temperature=0,
model="GLM-4",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
model_with_tools = llm.bind_tools([master_haiku_generator])
def should_continue(state: MessagesState):
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def call_model(state: MessagesState):
messages = state["messages"]
response = model_with_tools.invoke(messages)
return {"messages": [response]}
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
workflow.add_edge(START, "agent")
workflow.add_conditional_edges("agent", should_continue, ["tools", END])
workflow.add_edge("tools", "agent")
app = workflow.compile()
response = app.invoke(
{"messages": [("human", "Write me an incredible haiku about water.")]},
{"recursion_limit": 10},
)
for message in response["messages"]:
string_representation = f"{message.type.upper()}: {message.content}\n"
print(string_representation)
HUMAN: Write me an incredible haiku about water.
AI:
TOOL: Error: 1 validation error for master_haiku_generator
request.topic
Input should be a valid list [type=list_type, input_value={'Items': ['water', 'ocean', 'wave']}, input_type=dict]
For further information visit https://errors.pydantic.dev/2.8/v/list_type
Please fix your mistakes.
AI:
TOOL: Error: 1 validation error for master_haiku_generator
request.topic
Input should be a valid list [type=list_type, input_value={'Items': ['water', 'ocean', 'wave']}, input_type=dict]
For further information visit https://errors.pydantic.dev/2.8/v/list_type
Please fix your mistakes.
AI:
TOOL: Water's embrace vast,
Ocean's salty caress,
Waves dance with the moon.
AI: Here is an incredible haiku about water:
Water's embrace vast,
Ocean's salty caress,
Waves dance with the moon.
3.2 使用更好的模型
import json
from langchain_core.messages import AIMessage, ToolMessage
from langchain_core.messages.modifier import RemoveMessage
@tool
def master_haiku_generator(request: HaikuRequest):
"""Generates a haiku based on the provided topics."""
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
temperature=0,
model="GLM-4",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
chain = llm | StrOutputParser()
topics = ", ".join(request.topic)
haiku = chain.invoke(f"Write a haiku about {topics}")
return haiku
def call_tool(state: MessagesState):
tools_by_name = {master_haiku_generator.name: master_haiku_generator}
messages = state["messages"]
last_message = messages[-1]
output_messages = []
for tool_call in last_message.tool_calls:
try:
tool_result = tools_by_name[tool_call["name"]].invoke(tool_call["args"])
output_messages.append(
ToolMessage(
content=json.dumps(tool_result),
name=tool_call["name"],
tool_call_id=tool_call["id"],
)
)
except Exception as e:
# Return the error if the tool call fails
output_messages.append(
ToolMessage(
content="",
name=tool_call["name"],
tool_call_id=tool_call["id"],
additional_kwargs={"error": e},
)
)
return {"messages": output_messages}
llm = ChatOpenAI(
temperature=0,
model="GLM-4",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
model_with_tools = llm.bind_tools([master_haiku_generator])
from langchain_openai import ChatOpenAI
better_model = ChatOpenAI(
temperature=0,
model="GLM-4-plus",
openai_api_key="your api key",
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
better_model_with_tools = better_model.bind_tools([master_haiku_generator])
def should_continue(state: MessagesState):
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def should_fallback(
state: MessagesState,
) -> Literal["agent", "remove_failed_tool_call_attempt"]:
messages = state["messages"]
failed_tool_messages = [
msg
for msg in messages
if isinstance(msg, ToolMessage)
and msg.additional_kwargs.get("error") is not None
]
if failed_tool_messages:
return "remove_failed_tool_call_attempt"
return "agent"
def call_model(state: MessagesState):
messages = state["messages"]
response = model_with_tools.invoke(messages)
return {"messages": [response]}
def remove_failed_tool_call_attempt(state: MessagesState):
messages = state["messages"]
# Remove all messages from the most recent
# instance of AIMessage onwards.
last_ai_message_index = next(
i
for i, msg in reversed(list(enumerate(messages)))
if isinstance(msg, AIMessage)
)
messages_to_remove = messages[last_ai_message_index:]
return {"messages": [RemoveMessage(id=m.id) for m in messages_to_remove]}
# Fallback to a better model if a tool call fails
def call_fallback_model(state: MessagesState):
messages = state["messages"]
response = better_model_with_tools.invoke(messages)
return {"messages": [response]}
workflow = StateGraph(MessagesState)
workflow.add_node("agent", call_model)
workflow.add_node("tools", call_tool)
workflow.add_node("remove_failed_tool_call_attempt", remove_failed_tool_call_attempt)
workflow.add_node("fallback_agent", call_fallback_model)
workflow.add_edge(START, "agent")
workflow.add_conditional_edges("agent", should_continue, ["tools", END])
workflow.add_conditional_edges("tools", should_fallback)
workflow.add_edge("remove_failed_tool_call_attempt", "fallback_agent")
workflow.add_edge("fallback_agent", "tools")
app = workflow.compile()
可视化
try:
display(Image(app.get_graph().draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass
示例
from pprint import pprint
stream = app.stream(
{"messages": [("human", "Write me an incredible haiku about water.")]},
{"recursion_limit": 10},
)
for chunk in stream:
for key, value in chunk.items():
# Node
pprint(f"Node '{key}':")
pprint("\n---\n")
pprint(value['messages'][0].content)
"Node 'agent':"
'\n---\n'
"Node 'tools':"
'\n---\n'
"Node 'remove_failed_tool_call_attempt':"
'\n---\n'
"Node 'fallback_agent':"
'\n---\n'
"Node 'tools':"
'\n---\n'
"Node 'agent':"
'\n---\n'
('"Ripples dance, still,\\nCurrents weave through silent streams,\\nPeace in '
'flow\'s embrace."')
参考链接:https://langchain-ai.github.io/langgraph/how-tos/tool-calling-errors/#custom-strategies
如果有任何问题,欢迎在评论区提问。