langchain

Compare original and translation side by side

🇺🇸

Original

English
🇨🇳

Translation

Chinese

LangChain & LangGraph

LangChain & LangGraph

Build sophisticated LLM applications with composable chains and agent graphs.
使用可组合的链式调用和Agent图构建复杂的LLM应用。

Quick Start

快速开始

bash
pip install langchain langchain-openai langchain-anthropic langgraph
python
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
bash
pip install langchain langchain-openai langchain-anthropic langgraph
python
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate

Simple chain

Simple chain

llm = ChatAnthropic(model="claude-3-sonnet-20240229") prompt = ChatPromptTemplate.from_template("Explain {topic} in simple terms.") chain = prompt | llm
response = chain.invoke({"topic": "quantum computing"})
undefined
llm = ChatAnthropic(model="claude-3-sonnet-20240229") prompt = ChatPromptTemplate.from_template("Explain {topic} in simple terms.") chain = prompt | llm
response = chain.invoke({"topic": "quantum computing"})
undefined

LCEL (LangChain Expression Language)

LCEL (LangChain Expression Language)

Compose chains with the pipe operator:
python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
使用管道操作符组合链式调用:
python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

Chain with parsing

Chain with parsing

chain = ( {"topic": RunnablePassthrough()} | prompt | llm | StrOutputParser() )
result = chain.invoke("machine learning")
undefined
chain = ( {"topic": RunnablePassthrough()} | prompt | llm | StrOutputParser() )
result = chain.invoke("machine learning")
undefined

RAG Pipeline

RAG Pipeline

python
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
python
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough

Create vector store

Create vector store

embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_documents(documents, embeddings) retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_documents(documents, embeddings) retriever = vectorstore.as_retriever(search_kwargs={"k": 4})

RAG prompt

RAG prompt

prompt = ChatPromptTemplate.from_template(""" Answer based on the following context: {context}
Question: {question} """)
prompt = ChatPromptTemplate.from_template(""" Answer based on the following context: {context}
Question: {question} """)

RAG chain

RAG chain

rag_chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser() )
answer = rag_chain.invoke("What is the refund policy?")
undefined
rag_chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser() )
answer = rag_chain.invoke("What is the refund policy?")
undefined

LangGraph Agent

LangGraph Agent

python
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langchain_core.tools import tool
from typing import TypedDict, Annotated
import operator
python
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langchain_core.tools import tool
from typing import TypedDict, Annotated
import operator

Define state

Define state

class AgentState(TypedDict): messages: Annotated[list, operator.add]
class AgentState(TypedDict): messages: Annotated[list, operator.add]

Define tools

Define tools

@tool def search(query: str) -> str: """Search the web.""" return f"Results for: {query}"
@tool def calculator(expression: str) -> str: """Calculate mathematical expression.""" return str(eval(expression))
tools = [search, calculator]
@tool def search(query: str) -> str: """Search the web.""" return f"Results for: {query}"
@tool def calculator(expression: str) -> str: """Calculate mathematical expression.""" return str(eval(expression))
tools = [search, calculator]

Create graph

Create graph

graph = StateGraph(AgentState)
graph = StateGraph(AgentState)

Add nodes

Add nodes

graph.add_node("agent", call_model) graph.add_node("tools", ToolNode(tools))
graph.add_node("agent", call_model) graph.add_node("tools", ToolNode(tools))

Add edges

Add edges

graph.set_entry_point("agent") graph.add_conditional_edges( "agent", should_continue, {"continue": "tools", "end": END} ) graph.add_edge("tools", "agent")
graph.set_entry_point("agent") graph.add_conditional_edges( "agent", should_continue, {"continue": "tools", "end": END} ) graph.add_edge("tools", "agent")

Compile

Compile

app = graph.compile()
app = graph.compile()

Run

Run

result = app.invoke({"messages": [HumanMessage(content="What is 25 * 4?")]})
undefined
result = app.invoke({"messages": [HumanMessage(content="What is 25 * 4?")]})
undefined

Structured Output

结构化输出

python
from langchain_core.pydantic_v1 import BaseModel, Field

class Person(BaseModel):
    name: str = Field(description="Person's name")
    age: int = Field(description="Person's age")
    occupation: str = Field(description="Person's job")
python
from langchain_core.pydantic_v1 import BaseModel, Field

class Person(BaseModel):
    name: str = Field(description="Person's name")
    age: int = Field(description="Person's age")
    occupation: str = Field(description="Person's job")

Structured LLM

Structured LLM

structured_llm = llm.with_structured_output(Person)
result = structured_llm.invoke("John is a 30 year old engineer")
structured_llm = llm.with_structured_output(Person)
result = structured_llm.invoke("John is a 30 year old engineer")

Person(name='John', age=30, occupation='engineer')

Person(name='John', age=30, occupation='engineer')

undefined
undefined

Memory

对话记忆

python
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
python
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory

Message history

Message history

store = {}
def get_session_history(session_id: str): if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id]
store = {}
def get_session_history(session_id: str): if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id]

Chain with memory

Chain with memory

with_memory = RunnableWithMessageHistory( chain, get_session_history, input_messages_key="input", history_messages_key="history" )
with_memory = RunnableWithMessageHistory( chain, get_session_history, input_messages_key="input", history_messages_key="history" )

Use with session

Use with session

response = with_memory.invoke( {"input": "My name is Alice"}, config={"configurable": {"session_id": "user123"}} )
undefined
response = with_memory.invoke( {"input": "My name is Alice"}, config={"configurable": {"session_id": "user123"}} )
undefined

Streaming

流式输出

python
undefined
python
undefined

Stream tokens

Stream tokens

async for chunk in chain.astream({"topic": "AI"}): print(chunk.content, end="", flush=True)
async for chunk in chain.astream({"topic": "AI"}): print(chunk.content, end="", flush=True)

Stream events (for debugging)

Stream events (for debugging)

async for event in chain.astream_events({"topic": "AI"}, version="v1"): print(event)
undefined
async for event in chain.astream_events({"topic": "AI"}, version="v1"): print(event)
undefined

LangSmith Tracing

LangSmith 追踪

python
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "your-api-key"
os.environ["LANGCHAIN_PROJECT"] = "my-project"
python
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "your-api-key"
os.environ["LANGCHAIN_PROJECT"] = "my-project"

All chains are now traced automatically

All chains are now traced automatically

chain.invoke({"topic": "AI"})
undefined
chain.invoke({"topic": "AI"})
undefined

Resources

资源