Loading...
Loading...
INVOKE THIS SKILL when working with LangSmith tracing OR querying traces. Covers adding tracing to applications and querying/exporting trace data. Uses the langsmith CLI tool.
npx skill4agent add langchain-ai/langsmith-skills langsmith-traceLANGSMITH_API_KEY=lsv2_pt_your_api_key_here # Required
LANGSMITH_PROJECT=your-project-name # Optional: default project
LANGSMITH_WORKSPACE_ID=your-workspace-id # Optional: for org-scoped keys.envLANGSMITH_PROJECTcurl -sSL https://raw.githubusercontent.com/langchain-ai/langsmith-cli/main/scripts/install.sh | shexport LANGSMITH_TRACING=true
export LANGSMITH_API_KEY=<your-api-key>
export OPENAI_API_KEY=<your-openai-api-key> # or your LLM provider's keyLANGSMITH_PROJECTLANGCHAIN_CALLBACKS_BACKGROUND=false</python>
<typescript>
Use traceable() wrapper and wrapOpenAI() for automatic tracing.
```typescript
import { traceable } from "langsmith/traceable";
import { wrapOpenAI } from "langsmith/wrappers";
import OpenAI from "openai";
const client = wrapOpenAI(new OpenAI());
const myLlmPipeline = traceable(async (question: string): Promise<string> => {
const resp = await client.chat.completions.create({
model: "gpt-4o-mini",
messages: [{ role: "user", content: question }],
});
return resp.choices[0].message.content || "";
}, { name: "my_llm_pipeline" });
// Nested tracing example
const retrieveDocs = traceable(async (query: string): Promise<string[]> => {
return docs;
}, { name: "retrieve_docs" });
const generateAnswer = traceable(async (question: string, docs: string[]): Promise<string> => {
const resp = await client.chat.completions.create({
model: "gpt-4o-mini",
messages: [{ role: "user", content: `${question}\nContext: ${docs.join("\n")}` }],
});
return resp.choices[0].message.content || "";
}, { name: "generate_answer" });
const ragPipeline = traceable(async (question: string): Promise<string> => {
const docs = await retrieveDocs(question);
return await generateAnswer(question, docs);
}, { name: "rag_pipeline" });wrap_openai()wrapOpenAI()langsmithlangsmith
├── trace (operations on trace trees - USE THIS FIRST)
│ ├── list - List traces (filters apply to root run)
│ ├── get - Get single trace with full hierarchy
│ └── export - Export traces to JSONL files (one file per trace)
│
├── run (operations on individual runs - for specific analysis)
│ ├── list - List runs (flat, filters apply to any run)
│ ├── get - Get single run
│ └── export - Export runs to single JSONL file (flat)
│
├── dataset (dataset operations)
│ ├── list - List datasets
│ ├── get - Get dataset details
│ ├── create - Create empty dataset
│ ├── delete - Delete dataset
│ ├── export - Export dataset to file
│ └── upload - Upload local JSON as dataset
│
├── example (example operations)
│ ├── list - List examples in a dataset
│ ├── create - Add example to a dataset
│ └── delete - Delete an example
│
├── evaluator (evaluator operations)
│ ├── list - List evaluators
│ ├── upload - Upload evaluator
│ └── delete - Delete evaluator
│
├── experiment (experiment operations)
│ ├── list - List experiments
│ └── get - Get experiment results
│
├── thread (thread operations)
│ ├── list - List conversation threads
│ └── get - Get thread details
│
└── project (project operations)
└── list - List tracing projects | | |
|---|---|---|
| Filters apply to | Root run only | Any matching run |
| Not available | Available |
| Returns | Full hierarchy | Flat list |
| Export output | Directory (one file/trace) | Single file |
| </command_structure> |
langsmith# List recent traces (most common operation)
langsmith trace list --limit 10 --project my-project
# List traces with metadata (timing, tokens, costs)
langsmith trace list --limit 10 --include-metadata
# Filter traces by time
langsmith trace list --last-n-minutes 60
langsmith trace list --since 2025-01-20T10:00:00Z
# Get specific trace with full hierarchy
langsmith trace get <trace-id>
# List traces and show hierarchy inline
langsmith trace list --limit 5 --show-hierarchy
# Export traces to JSONL (one file per trace, includes all runs)
langsmith trace export ./traces --limit 20 --full
# Filter traces by performance
langsmith trace list --min-latency 5.0 --limit 10 # Slow traces (>= 5s)
langsmith trace list --error --last-n-minutes 60 # Failed traces
# List specific run types (flat list)
langsmith run list --run-type llm --limit 20--trace-ids abc,def--limit N--project NAME--last-n-minutes N--since TIMESTAMP--error / --no-error--name PATTERN--min-latency SECONDS5--max-latency SECONDS--min-tokens N--tags tag1,tag2--filter QUERY# Filter traces by feedback score using raw LangSmith query
langsmith trace list --filter 'and(eq(feedback_key, "correctness"), gte(feedback_score, 0.8))'.jsonl{"run_id": "...", "trace_id": "...", "name": "...", "run_type": "...", "parent_run_id": "...", "inputs": {...}, "outputs": {...}}--include-io--full