Loading...
Loading...
Use bigquery CLI (instead of `bq`) for all Google BigQuery and GCP data warehouse operations including SQL query execution, data ingestion (streaming insert, bulk load, JSONL/CSV/Parquet), data extraction/export, dataset/table/view management, external tables, schema operations, query templates, cost estimation with dry-run, authentication with gcloud, data pipelines, ETL workflows, and MCP/LSP server integration for AI-assisted querying and editor support. Modern Rust-based replacement for the Python `bq` CLI with faster startup, better cost awareness, and streaming support. Handles both small-scale streaming inserts (<1000 rows) and large-scale bulk loading (>10MB files), with support for Cloud Storage integration.
npx skill4agent add lanej/dotfiles bigquerybigquerybigquery# Check if authenticated and verify required scopes
bigquery auth check
# Will show:
# - Authentication status
# - Active account
# - BigQuery scopes availability# Authenticate with gcloud including all required BigQuery scopes
bigquery auth login
# This will:
# 1. Run gcloud auth login
# 2. Ensure all necessary BigQuery scopes are granted
# 3. Verify authentication succeededbigquery auth check# Basic query execution (interactive cost confirmation)
bigquery query "SELECT * FROM dataset.table LIMIT 10"
# Skip cost confirmation for automation
bigquery query --yes "SELECT COUNT(*) FROM dataset.table"
# JSON output (default)
bigquery query "SELECT * FROM dataset.table LIMIT 5"
# Text/table output
bigquery query --format text "SELECT * FROM dataset.table LIMIT 5"--yes# JSON output (default, machine-readable)
bigquery query "SELECT * FROM dataset.table"
bigquery query --format json "SELECT * FROM dataset.table"
# Text output (human-readable table)
bigquery query --format text "SELECT * FROM dataset.table"# Estimate cost without executing
bigquery dry-run "SELECT * FROM large_dataset.table WHERE date >= '2025-01-01'"
# Returns:
# - Bytes that would be processed
# - Estimated cost
# - No actual data# List datasets in current project (text format, default)
bigquery datasets list my-project
# JSON output
bigquery datasets list my-project --format json
# Example output shows:
# - Dataset ID
# - Location
# - Creation time
# - Labels (if any)project.datasetproject# List tables in a dataset (text format, first 10)
bigquery tables list my-project.my-dataset
# JSON output
bigquery tables list my-project.my-dataset --format json
# Limit results
bigquery tables list my-project.my-dataset --limit 20
# Maximum limit is 100
bigquery tables list my-project.my-dataset --limit 100# Show table schema and metadata (text format)
bigquery tables describe my-project.my-dataset.my-table
# JSON output
bigquery tables describe my-project.my-dataset.my-table --format json
# Output includes:
# - Column names and types
# - Nullability (NULLABLE, REQUIRED, REPEATED)
# - Mode information
# - Table metadata# Create sample JSONL file
cat > users.jsonl <<EOF
{"id": "1", "name": "Alice Johnson", "email": "alice@example.com", "age": 30}
{"id": "2", "name": "Bob Smith", "email": "bob@example.com", "age": 25}
{"id": "3", "name": "Charlie Brown", "email": "charlie@example.com", "age": 35}
EOF
# Insert from JSONL file
bigquery tables insert my-project.dataset.users \
--data users.jsonl --format json# Stream from command output
echo '{"id": "1", "name": "Alice", "email": "alice@example.com"}' | \
bigquery tables insert my-project.dataset.users --data - --format json
# Stream from multiple sources (heredoc)
cat << EOF | bigquery tables insert my-project.dataset.users --data - --format json
{"id": "1", "name": "Alice", "email": "alice@example.com", "age": 30}
{"id": "2", "name": "Bob", "email": "bob@example.com", "age": 25}
{"id": "3", "name": "Charlie", "email": "charlie@example.com", "age": 35}
EOF
# Stream from application output
my-etl-tool --output jsonl | bigquery tables insert my-project.dataset.events --data -
# Stream from compressed file
gunzip -c logs.jsonl.gz | bigquery tables insert my-project.dataset.logs --data -
# Stream from jq transformation
cat raw_data.json | jq -c '.records[]' | \
bigquery tables insert my-project.dataset.processed --data -{"field1":"value1","field2":"value2"}\n# Create sample CSV file
cat > users.csv <<EOF
id,name,email,age
1,Alice Johnson,alice@example.com,30
2,Bob Smith,bob@example.com,25
3,"Charlie Brown, Jr.",charlie@example.com,35
EOF
# Insert from CSV file
bigquery tables insert my-project.dataset.users \
--data users.csv --format csv# Stream from heredoc
cat << EOF | bigquery tables insert my-project.dataset.users --data - --format csv
id,name,email,age
1,Alice Johnson,alice@example.com,30
2,Bob Smith,bob@example.com,25
3,Charlie Brown,charlie@example.com,35
EOF
# Stream from application output
./generate_report.sh | bigquery tables insert my-project.dataset.reports --data - --format csv
# Stream from compressed CSV
gunzip -c data.csv.gz | bigquery tables insert my-project.dataset.imports --data -
# Stream from curl/API response
curl -s https://api.example.com/export.csv | \
bigquery tables insert my-project.dataset.api_data --data - --format csv
# Transform and stream CSV
cat raw.csv | tail -n +2 | awk '{print tolower($0)}' | \
bigquery tables insert my-project.dataset.cleaned --data - --format csv# Insert inline JSON (single object)
bigquery tables insert my-project.dataset.users \
--json '{"id": "1", "name": "Alice", "email": "alice@example.com"}'
# Insert inline JSON array
bigquery tables insert my-project.dataset.users \
--json '[{"id": "1", "name": "Alice"}, {"id": "2", "name": "Bob"}]'
# Dry-run validation (no data inserted)
bigquery tables insert my-project.dataset.users \
--data users.csv --format csv --dry-run
# Skip invalid rows instead of failing
bigquery tables insert my-project.dataset.users \
--data users.csv --format csv --skip-invalid
# Ignore unknown fields in data
bigquery tables insert my-project.dataset.users \
--data users.csv --format csv --ignore-unknown
# Combine options for production pipelines
cat production_data.jsonl | \
bigquery tables insert my-project.dataset.production \
--data - --format json \
--skip-invalid \
--ignore-unknown--json <JSON>--data <PATH>---format <FORMAT>--dry-run--skip-invalid--ignore-unknown--yestables insertbigquery tables load gs://...# Load from Cloud Storage URI (RECOMMENDED - no bucket config needed)
bigquery tables load my-project.dataset.users \
gs://my-bucket/data.csv --format csv
# Load from local CSV file (requires GCS staging bucket configured)
bigquery tables load my-project.dataset.users data.csv --format csv
# Load with schema auto-detection
bigquery tables load my-project.dataset.new_table data.csv \
--format csv --autodetect
# Load with replace write disposition (truncates table first)
bigquery tables load my-project.dataset.users data.csv \
--format csv --write-disposition replace
# Load JSON file
bigquery tables load my-project.dataset.events events.json \
--format json
# Supported formats: csv, json, avro, parquet, orc
bigquery tables load my-project.dataset.table data.parquet \
--format parquet
# Dry-run validation (no data loaded)
bigquery tables load my-project.dataset.users data.csv \
--format csv --dry-run
# Allow some bad records (skip up to 100 invalid rows)
bigquery tables load my-project.dataset.users data.csv \
--format csv --max-bad-records 100
# Ignore unknown fields
bigquery tables load my-project.dataset.users data.csv \
--format csv --ignore-unknown
# Skip confirmation prompts (for automation/CI)
bigquery tables load my-project.dataset.users data.csv \
--format csv --write-disposition replace --yesappendreplace--max-bad-recordstables inserttables insert--format <FORMAT>--write-disposition <DISPOSITION>--autodetect--dry-run--max-bad-records <N>--ignore-unknown--yes# Extract table to Cloud Storage as CSV
bigquery tables extract my-project.dataset.users \
gs://my-bucket/exports/users.csv --format csv
# Extract as JSON
bigquery tables extract my-project.dataset.events \
gs://my-bucket/exports/events-*.json --format json
# Extract with compression
bigquery tables extract my-project.dataset.large_table \
gs://my-bucket/exports/data-*.csv.gz --format csv --compression gzip
# Extract as Avro with Snappy compression
bigquery tables extract my-project.dataset.events \
gs://my-bucket/exports/events-*.avro --format avro --compression snappy
# Extract as Parquet
bigquery tables extract my-project.dataset.analytics \
gs://my-bucket/exports/analytics.parquet --format parquet
# CSV with custom delimiter and header
bigquery tables extract my-project.dataset.data \
gs://my-bucket/data.csv \
--format csv \
--field-delimiter "|" \
--print-header
# Dry-run to validate configuration
bigquery tables extract my-project.dataset.users \
gs://my-bucket/users.csv --format csv --dry-run
# Skip confirmation prompt
bigquery tables extract my-project.dataset.large \
gs://my-bucket/export.csv --format csv --yes# Create CSV external table
bigquery tables create-external my-project.dataset.external_table \
--source-uri gs://bucket/data.csv \
--format csv \
--schema "id:INTEGER,name:STRING,created_at:TIMESTAMP"
# Create with auto-detected schema
bigquery tables create-external my-project.dataset.external_table \
--source-uri gs://bucket/data.csv \
--format csv \
--autodetect
# Multiple source URIs (comma-separated)
bigquery tables create-external my-project.dataset.external_table \
--source-uri "gs://bucket/file1.csv,gs://bucket/file2.csv" \
--format csv \
--autodetect
# Multiple source URIs (multiple flags)
bigquery tables create-external my-project.dataset.external_table \
--source-uri gs://bucket/file1.csv \
--source-uri gs://bucket/file2.csv \
--format csv \
--autodetect
# CSV-specific options
bigquery tables create-external my-project.dataset.external_table \
--source-uri gs://bucket/data.csv \
--format csv \
--schema "id:INTEGER,name:STRING" \
--field-delimiter "," \
--skip-leading-rows 1
# Other formats (Parquet, JSON, Avro, ORC)
bigquery tables create-external my-project.dataset.parquet_table \
--source-uri gs://bucket/data.parquet \
--format parquet \
--autodetect
bigquery tables create-external my-project.dataset.json_table \
--source-uri gs://bucket/data.jsonl \
--format json \
--autodetect--source-uri <URI>--format <FORMAT>--schema <SCHEMA>--autodetect--field-delimiter <DELIMITER>--skip-leading-rows <N># Update source URIs
bigquery tables update-external my-project.dataset.external_table \
--source-uri gs://bucket/new-data.csv
# Update schema
bigquery tables update-external my-project.dataset.external_table \
--schema "id:INTEGER,name:STRING,email:STRING"
# Update CSV options
bigquery tables update-external my-project.dataset.external_table \
--field-delimiter "|" \
--skip-leading-rows 2
# Update multiple properties
bigquery tables update-external my-project.dataset.external_table \
--source-uri gs://bucket/new-data.csv \
--schema "id:INTEGER,name:STRING,updated_at:TIMESTAMP" \
--skip-leading-rows 1# List all available templates (text format)
bigquery templates list
# JSON output
bigquery templates list --format json
# Shows:
# - Template name
# - Description
# - Parameters
# - Query preview# Search by name or description
bigquery templates search "customer"
bigquery templates search "daily metrics"
# JSON output
bigquery templates search "analytics" --format json# Validate template for parameter consistency
bigquery templates validate my-template
# Checks:
# - Parameter definitions match query placeholders
# - Required parameters are defined
# - Parameter types are valid# Run template with default parameters
bigquery templates run my-template
# Override parameters
bigquery templates run daily-report \
--param date=2025-01-15 \
--param region=US
# Multiple parameters
bigquery templates run customer-analysis \
--param customer_id=CUST123 \
--param start_date=2025-01-01 \
--param end_date=2025-01-31
# JSON output
bigquery templates run my-template --format json
# Skip cost confirmation
bigquery templates run expensive-query --yes--param <KEY=VALUE>--format <FORMAT>--yes# 1. Search for templates
bigquery templates search "revenue"
# 2. Validate template before running
bigquery templates validate monthly-revenue
# 3. Run with parameters
bigquery templates run monthly-revenue \
--param month=2025-01 \
--param min_amount=1000
# 4. Run in automation (skip confirmation)
bigquery templates run monthly-revenue \
--param month=2025-01 \
--yes \
--format json > output.json# Start MCP server in stdio mode
bigquery mcp stdio
# Server will:
# - Accept MCP protocol messages on stdin
# - Send responses on stdout
# - Expose BigQuery tools to MCP clients# Start HTTP MCP server on default port 8080
bigquery mcp http
# Specify custom port
bigquery mcp http --port 3000
# Server provides:
# - HTTP endpoint for MCP protocol
# - JSON-RPC over HTTP
# - Remote access to BigQuery tools.claude/mcp.json{
"mcpServers": {
"bigquery": {
"command": "bigquery",
"args": ["mcp", "stdio"],
"env": {
"GOOGLE_CLOUD_PROJECT": "my-project"
}
}
}
}{
"mcpServers": {
"bigquery": {
"url": "http://localhost:8080",
"transport": "http"
}
}
}"Find all tables containing customer purchase data from the last 30 days"
→ MCP translates to appropriate SQL query"What columns are in the analytics.events table?"
→ MCP returns schema information"Show me total revenue by region for Q1 2025"
→ MCP generates and executes SQL"Run the monthly revenue template for January 2025"
→ MCP executes template with parameters# Start LSP server
bigquery lsp
# Server provides:
# - Language Server Protocol communication
# - SQL syntax validation
# - Schema-aware completions
# - Query formatting
# - Hover documentation-- In nvim/lua/bigquery-lsp.lua or init.lua
vim.api.nvim_create_autocmd("FileType", {
pattern = { "sql", "bq", "bigquery" },
callback = function()
vim.lsp.start({
name = "bigquery-lsp",
cmd = { "bigquery", "lsp" },
root_dir = vim.fn.getcwd(),
})
end,
})settings.json{
"bigquery-lsp": {
"command": "bigquery",
"args": ["lsp"],
"filetypes": ["sql", "bq", "bigquery"]
}
}languages.toml[[language]]
name = "sql"
language-servers = ["bigquery-lsp"]
[language-server.bigquery-lsp]
command = "bigquery"
args = ["lsp"]# 1. Verify authentication
bigquery auth check
# 2. List available datasets
bigquery datasets list my-project
# 3. List tables in dataset
bigquery tables list my-project.analytics
# 4. Check table schema
bigquery tables describe my-project.analytics.events
# 5. Preview data (text format for readability)
bigquery query --format text \
"SELECT * FROM my-project.analytics.events LIMIT 10"
# 6. Get row count
bigquery query "SELECT COUNT(*) as total FROM my-project.analytics.events"
# 7. Check data distribution
bigquery query --format text "
SELECT
DATE(timestamp) as date,
COUNT(*) as events
FROM my-project.analytics.events
GROUP BY date
ORDER BY date DESC
LIMIT 30
"# 1. Dry run to estimate cost
bigquery dry-run "
SELECT *
FROM my-project.large_dataset.table
WHERE date >= '2025-01-01'
"
# 2. If cost is acceptable, run query
bigquery query "
SELECT *
FROM my-project.large_dataset.table
WHERE date >= '2025-01-01'
"
# 3. For automation, skip confirmation
bigquery query --yes "
SELECT *
FROM my-project.large_dataset.table
WHERE date >= '2025-01-01'
" > results.json# 1. Search for relevant templates
bigquery templates search "daily"
# 2. Validate template
bigquery templates validate daily-metrics
# 3. Run template with parameters
bigquery templates run daily-metrics \
--param date=$(date +%Y-%m-%d) \
--param region=US \
--format json > daily-report.json
# 4. Schedule in cron or CI/CD
# 0 1 * * * bigquery templates run daily-metrics --param date=$(date +%Y-%m-%d) --yes# 1. Create external table pointing to GCS
bigquery tables create-external my-project.staging.raw_logs \
--source-uri gs://logs-bucket/2025-01-*.json \
--format json \
--autodetect
# 2. Query external table
bigquery query "
SELECT
timestamp,
user_id,
action
FROM my-project.staging.raw_logs
WHERE action = 'purchase'
LIMIT 100
"
# 3. Update external table when new files arrive
bigquery tables update-external my-project.staging.raw_logs \
--source-uri gs://logs-bucket/2025-02-*.json# 1. Load initial data
bigquery tables load my-project.dataset.events \
gs://bucket/events-2025-01-01.csv \
--format csv \
--write-disposition replace
# 2. Append incremental data
bigquery tables load my-project.dataset.events \
gs://bucket/events-2025-01-02.csv \
--format csv \
--write-disposition append
# 3. Verify data loaded
bigquery query "
SELECT
DATE(timestamp) as date,
COUNT(*) as count
FROM my-project.dataset.events
GROUP BY date
ORDER BY date
"# 1. Insert single event (inline JSON)
bigquery tables insert my-project.dataset.events \
--json '{"user_id": "U123", "event": "click", "timestamp": "2025-01-15T10:00:00Z"}'
# 2. Stream JSONL from application
my-app --output jsonl | bigquery tables insert my-project.dataset.events --data - --format json
# 3. Insert batch from JSONL file
bigquery tables insert my-project.dataset.events \
--data events.jsonl --format json
# 4. Stream with transformation and error handling
cat raw_events.json | jq -c '.events[]' | \
bigquery tables insert my-project.dataset.events \
--data - --format json \
--skip-invalid \
--ignore-unknownbigquery dry-run--format text--format json--yesbigquery dry-runbigquery auth checkbigquery auth loginbigquery templates validateinsertload--data ---dry-run--max-bad-recordsreplaceappend# Set default project
export GOOGLE_CLOUD_PROJECT=my-project
# Set credentials (for service accounts)
export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
# Add to ~/.zshrc or ~/.bashrc for persistence
echo 'export GOOGLE_CLOUD_PROJECT=my-project' >> ~/.zshrcbigquery auth login
# Opens browser for Google authenticationexport GOOGLE_APPLICATION_CREDENTIALS=/path/to/sa-key.json
bigquery auth checkgcloud auth application-default login
bigquery auth check# Check current auth status
bigquery auth check
# Re-authenticate if needed
bigquery auth login
# Verify gcloud is set to correct project
gcloud config get-value project
# Set project if needed
gcloud config set project my-project# Wrong - missing project/dataset
bigquery query "SELECT * FROM table"
# Correct - fully qualified
bigquery query "SELECT * FROM my-project.my-dataset.my-table"
# Or use backticks for reserved words
bigquery query "SELECT * FROM \`my-project.my-dataset.my-table\`"# Check estimated cost
bigquery dry-run "SELECT * FROM large_table WHERE date >= '2025-01-01'"
# Optimize with partition filters
bigquery dry-run "
SELECT * FROM large_table
WHERE _PARTITIONDATE = '2025-01-15'
"# List all templates
bigquery templates list
# Search for template
bigquery templates search "keyword"
# Use exact template name
bigquery templates run exact-template-namebigquery tables loadtables insertbigquery tables insert my-project.dataset.table \
--data /tmp/data.jsonl \
--format jsongsutil cp /tmp/large-file.jsonl gs://my-bucket/
bigquery tables load my-project.dataset.table \
gs://my-bucket/large-file.jsonl \
--format json# Schema format: column:type,column:type,...
bigquery tables create-external my-project.dataset.table \
--source-uri gs://bucket/file.csv \
--format csv \
--schema "id:INTEGER,name:STRING,created_at:TIMESTAMP"
# Or use autodetect
bigquery tables create-external my-project.dataset.table \
--source-uri gs://bucket/file.csv \
--format csv \
--autodetect# For stdio mode, ensure client is using stdio transport
bigquery mcp stdio
# For HTTP mode, check port and firewall
bigquery mcp http --port 8080
# Test HTTP endpoint
curl http://localhost:8080# Check bigquery is in PATH
which bigquery
# Test LSP manually
bigquery lsp
# Verify editor configuration points to correct command
# Neovim: check cmd = { "bigquery", "lsp" }
# VS Code: check "command": "bigquery", "args": ["lsp"]# Authentication
bigquery auth check # Check auth status
bigquery auth login # Login with gcloud
# Queries
bigquery query "SELECT ..." # Execute query
bigquery query --yes "SELECT ..." # Skip confirmation
bigquery query --format text "SELECT ..." # Table output
bigquery dry-run "SELECT ..." # Estimate cost
# Datasets
bigquery datasets list PROJECT # List datasets
# Tables
bigquery tables list PROJECT.DATASET # List tables
bigquery tables describe PROJECT.DATASET.TABLE # Show schema
bigquery tables insert TABLE --json '{"id": 1}' # Insert rows (inline)
bigquery tables insert TABLE --data file.jsonl --format json # Insert from JSONL
cat data.jsonl | bigquery tables insert TABLE --data - # Stream insert
bigquery tables load TABLE file.csv # Load data (bulk)
bigquery tables load TABLE gs://bucket/file.csv # Load from GCS
bigquery tables extract TABLE gs://bucket/output.csv # Extract to GCS
bigquery tables create-external TABLE --source-uri ... # External table
bigquery tables update-external TABLE --source-uri ... # Update external
# Templates
bigquery templates list # List templates
bigquery templates search "keyword" # Search templates
bigquery templates validate TEMPLATE # Validate template
bigquery templates run TEMPLATE --param key=value # Run template
# MCP Server
bigquery mcp stdio # Start MCP (stdio mode)
bigquery mcp http # Start MCP (HTTP mode)
bigquery mcp http --port 3000 # Custom port
# LSP Server
bigquery lsp # Start LSP server#!/bin/bash
# daily-etl.sh
# Authenticate with service account
export GOOGLE_APPLICATION_CREDENTIALS=/secrets/sa-key.json
bigquery auth check || exit 1
# Run daily ETL template
bigquery templates run daily-etl \
--param date=$(date +%Y-%m-%d) \
--yes \
--format json > /tmp/etl-result.json
# Check result
if [ $? -eq 0 ]; then
echo "ETL completed successfully"
else
echo "ETL failed"
exit 1
fi#!/bin/bash
# check-data-quality.sh
# Run data quality template
RESULT=$(bigquery templates run data-quality-check \
--param table=my-project.dataset.table \
--yes \
--format json)
# Parse result and check quality metrics
INVALID_ROWS=$(echo $RESULT | jq '.invalid_rows')
if [ "$INVALID_ROWS" -gt 100 ]; then
echo "Data quality check failed: $INVALID_ROWS invalid rows"
exit 1
else
echo "Data quality check passed"
fi#!/bin/bash
# generate-report.sh
# Generate weekly report
bigquery templates run weekly-revenue-report \
--param week_start=$(date -d "last monday" +%Y-%m-%d) \
--param week_end=$(date -d "next sunday" +%Y-%m-%d) \
--yes \
--format json > /reports/weekly-$(date +%Y-%m-%d).json
# Upload to GCS
gsutil cp /reports/weekly-*.json gs://reports-bucket/bigquery auth {check,login}bigquery querybigquery dry-runbigquery datasets listbigquery tables {list,describe,insert,load,extract,create-external,update-external}bigquery templates {list,search,validate,run}bigquery mcp {stdio,http}bigquery lsp--data -auth checkdry-run--yesinsertload--data -