Loading...
Loading...
Query Ethereum network data via ethpandaops CLI or MCP server. Use when analyzing blockchain data, block timing, attestations, validator performance, network health, or infrastructure metrics. Provides access to ClickHouse (blockchain data), Prometheus (metrics), Loki (logs), and Dora (explorer APIs).
npx skill4agent add ethpandaops/mcp queryethpandaopspandapanda# Discovery
panda datasources # List all datasources
panda datasources --type clickhouse # Filter by type
panda schema # List ClickHouse tables
panda schema beacon_api_eth_v1_events_block # Show table schema
panda docs # List Python API modules
panda docs clickhouse # Show module docs
# Search
panda search examples "block arrival time"
panda search examples "attestation" --category attestations --limit 5
panda search runbooks "finality delay"
panda search runbooks "validator" --tag performance
# Execute
panda execute --code 'from ethpandaops import clickhouse; print(clickhouse.list_datasources())'
panda execute --file script.py
panda execute --code '...' --session <id> # Reuse session
echo 'print("hello")' | panda execute
# Sessions
panda session list
panda session create
panda session destroy <session-id>--json| Resource | Description |
|---|---|
| All configured datasources |
| ClickHouse clusters |
| Prometheus instances |
| Loki instances |
| Active Ethereum networks |
| Available tables |
| Table schema details |
| Python library API docs |
search_examples(query="block arrival time")
search_runbooks(query="network not finalizing")
execute_python(code="...")
manage_session(operation="list")from ethpandaops import clickhouse
# List available clusters
clusters = clickhouse.list_datasources()
# Returns: [{"name": "xatu", "database": "default"}, {"name": "xatu-cbt", ...}]
# Query data (returns pandas DataFrame)
df = clickhouse.query("xatu-cbt", """
SELECT
slot,
avg(seen_slot_start_diff) as avg_arrival_ms
FROM mainnet.fct_block_first_seen_by_node
WHERE slot_start_date_time >= now() - INTERVAL 1 HOUR
GROUP BY slot
ORDER BY slot DESC
""")
# Parameterized queries
df = clickhouse.query("xatu", "SELECT * FROM blocks WHERE slot > {slot}", {"slot": 1000})xatu-cbtxatuslot_start_date_time >= now() - INTERVAL X HOURmeta_network_name = 'mainnet'mainnet.table_namefrom ethpandaops import prometheus
# List instances
instances = prometheus.list_datasources()
# Instant query
result = prometheus.query("ethpandaops", "up")
# Range query
result = prometheus.query_range(
"ethpandaops",
"rate(http_requests_total[5m])",
start="now-1h",
end="now",
step="1m"
)nownow-1hnow-30mfrom ethpandaops import loki
# Step 1: List instances
instances = loki.list_datasources()
# Step 2: Fetch all available labels
labels = loki.get_labels("ethpandaops")
print(labels)
# Example: ['app', 'cluster', 'ethereum_cl', 'ethereum_el', 'ethereum_network',
# 'instance', 'namespace', 'node', 'testnet', 'validator_client', ...]
# Step 3: Get values for a specific label to build your filter
networks = loki.get_label_values("ethpandaops", "testnet")
print(networks) # e.g. ['fusaka-devnet-3', 'hoodi', 'sepolia', ...]
cl_clients = loki.get_label_values("ethpandaops", "ethereum_cl")
print(cl_clients) # e.g. ['lighthouse', 'prysm', 'teku', 'nimbus', 'lodestar', 'grandine']
# Step 4: Query logs with label filters
logs = loki.query(
"ethpandaops",
'{testnet="hoodi", ethereum_cl="lighthouse"} |= "error"',
start="now-1h",
limit=100
)testnethoodifusaka-devnet-3ethereum_cllighthouseprysmtekuethereum_elgethnethermindbesuethereum_networkinstancevalidator_clientCRITERRERRORWARNINFODEBUGlevel=error"level":"error""severity":"ERROR"EWC|~ "(?i)(CRIT|ERR)"|~ "level=(error|fatal)"<dora-url>/api/swagger/index.htmlfrom ethpandaops import dora
base_url = dora.get_base_url("mainnet")
print(f"Swagger docs: {base_url}/api/swagger/index.html")WebFetch{base_url}/api/swagger/index.htmldorafrom ethpandaops import dora
# Get network health
overview = dora.get_network_overview("mainnet")
print(f"Current epoch: {overview['current_epoch']}")
print(f"Active validators: {overview['active_validator_count']}")
# Check finality
epochs_behind = overview['current_epoch'] - overview.get('finalized_epoch', 0)
if epochs_behind > 2:
print(f"Warning: {epochs_behind} epochs behind finality")
# Generate explorer links
link = dora.link_validator("mainnet", "12345")
link = dora.link_slot("mainnet", "9000000")
link = dora.link_epoch("mainnet", 280000)from ethpandaops import dora
import httpx
base_url = dora.get_base_url("mainnet")
# Call any endpoint discovered from swagger
with httpx.Client(timeout=30) as client:
resp = client.get(f"{base_url}/api/v1/<endpoint>")
data = resp.json()from ethpandaops import storage
# Save visualization
import matplotlib.pyplot as plt
plt.savefig("/workspace/chart.png")
# Upload for public URL
url = storage.upload("/workspace/chart.png")
print(f"Chart URL: {url}")
# List uploaded files
files = storage.list_files()/workspace/--session <id>session_id# Call 1: Query and save
from ethpandaops import clickhouse
df = clickhouse.query("xatu-cbt", "SELECT ...")
df.to_parquet("/workspace/data.parquet")# Call 2: Load and visualize (reuse session from Call 1)
import pandas as pd
import matplotlib.pyplot as plt
from ethpandaops import storage
df = pd.read_parquet("/workspace/data.parquet")
plt.figure(figsize=(12, 6))
plt.plot(df["slot"], df["value"])
plt.savefig("/workspace/chart.png")
url = storage.upload("/workspace/chart.png")
print(f"Chart: {url}")slot_start_date_time >= now() - INTERVAL X HOURpanda search examples "..."/workspace/slot_start_date_timexatu-cbtxatupanda docspython://ethpandaopsstorage.upload()