Loading...
Loading...
Build with D1 serverless SQLite database on Cloudflare's edge. Use when: creating databases, writing SQL migrations, querying D1 from Workers, handling relational data, or troubleshooting D1_ERROR, statement too long, migration failures, or query performance issues. Prevents 14 documented errors.
npx skill4agent add jezweb/claude-skills cloudflare-d1# Create a new D1 database
npx wrangler d1 create my-database
# Output includes database_id - save this!
# ✅ Successfully created DB 'my-database'
#
# [[d1_databases]]
# binding = "DB"
# database_name = "my-database"
# database_id = "<UUID>"wrangler.jsonc{
"name": "my-worker",
"main": "src/index.ts",
"compatibility_date": "2025-10-11",
"d1_databases": [
{
"binding": "DB", // Available as env.DB in your Worker
"database_name": "my-database", // Name from wrangler d1 create
"database_id": "<UUID>", // ID from wrangler d1 create
"preview_database_id": "local-db" // For local development
}
]
}bindingenv.DBdatabase_idpreview_database_iddatabase_id# Create migration file
npx wrangler d1 migrations create my-database create_users_table
# This creates: migrations/0001_create_users_table.sql-- migrations/0001_create_users_table.sql
DROP TABLE IF EXISTS users;
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER
);
-- Create index for common queries
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Optimize database
PRAGMA optimize;# Apply locally first (for testing)
npx wrangler d1 migrations apply my-database --local
# Apply to production when ready
npx wrangler d1 migrations apply my-database --remote// src/index.ts
import { Hono } from 'hono';
type Bindings = {
DB: D1Database;
};
const app = new Hono<{ Bindings: Bindings }>();
app.get('/api/users/:email', async (c) => {
const email = c.req.param('email');
try {
// ALWAYS use prepared statements with bind()
const result = await c.env.DB.prepare(
'SELECT * FROM users WHERE email = ?'
)
.bind(email)
.first();
if (!result) {
return c.json({ error: 'User not found' }, 404);
}
return c.json(result);
} catch (error: any) {
console.error('D1 Error:', error.message);
return c.json({ error: 'Database error' }, 500);
}
});
export default app;# 1. Create migration
npx wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
# 2. List unapplied migrations
npx wrangler d1 migrations list <DATABASE_NAME> --local
npx wrangler d1 migrations list <DATABASE_NAME> --remote
# 3. Apply migrations
npx wrangler d1 migrations apply <DATABASE_NAME> --local # Test locally
npx wrangler d1 migrations apply <DATABASE_NAME> --remote # Deploy to productionmigrations/
├── 0000_initial_schema.sql
├── 0001_add_users_table.sql
├── 0002_add_posts_table.sql
└── 0003_add_indexes.sqld1_migrations{
"d1_databases": [
{
"binding": "DB",
"database_name": "my-database",
"database_id": "<UUID>",
"migrations_dir": "db/migrations", // Custom directory (default: migrations/)
"migrations_table": "schema_migrations" // Custom tracking table (default: d1_migrations)
}
]
}-- Use IF NOT EXISTS to make migrations idempotent
CREATE TABLE IF NOT EXISTS users (...);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Run PRAGMA optimize after schema changes
PRAGMA optimize;
-- Use UPPERCASE BEGIN/END in triggers (lowercase fails remotely)
CREATE TRIGGER update_timestamp
AFTER UPDATE ON users
FOR EACH ROW
BEGIN
UPDATE users SET updated_at = unixepoch() WHERE user_id = NEW.user_id;
END;
-- Use transactions for data migrations
BEGIN TRANSACTION;
UPDATE users SET updated_at = unixepoch() WHERE updated_at IS NULL;
COMMIT;-- DON'T include BEGIN TRANSACTION at start of migration file (D1 handles this)
BEGIN TRANSACTION; -- ❌ Remove this
-- DON'T use lowercase begin/end in triggers (works locally, FAILS remotely)
CREATE TRIGGER my_trigger
AFTER INSERT ON table
begin -- ❌ Use BEGIN (uppercase)
UPDATE ...;
end; -- ❌ Use END (uppercase)
-- DON'T use MySQL/PostgreSQL syntax
ALTER TABLE users MODIFY COLUMN email VARCHAR(255); -- ❌ Not SQLite
-- DON'T create tables without IF NOT EXISTS
CREATE TABLE users (...); -- ❌ Fails if table exists-- Temporarily disable foreign key checks during schema changes
PRAGMA defer_foreign_keys = true;
-- Make schema changes that would violate foreign keys
ALTER TABLE posts DROP COLUMN author_id;
ALTER TABLE posts ADD COLUMN user_id INTEGER REFERENCES users(user_id);
-- Foreign keys re-enabled automatically at end of migrationinterface Env { DB: D1Database; }
type Bindings = { DB: D1Database; };
const app = new Hono<{ Bindings: Bindings }>();const user = await env.DB.prepare('SELECT * FROM users WHERE email = ?')
.bind(email).first();.all(){ results, meta }.first().first('column').run(){ success, meta }const results = await env.DB.batch([
env.DB.prepare('SELECT * FROM users WHERE user_id = ?').bind(1),
env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(1)
]);await env.DB.exec('SELECT * FROM users;'); // Only for migrations/maintenance// CREATE
const { meta } = await env.DB.prepare(
'INSERT INTO users (email, username, created_at) VALUES (?, ?, ?)'
).bind(email, username, Date.now()).run();
const newUserId = meta.last_row_id;
// READ (single)
const user = await env.DB.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();
// READ (multiple)
const { results } = await env.DB.prepare('SELECT * FROM users LIMIT ?')
.bind(10).all();
// UPDATE
const { meta } = await env.DB.prepare('UPDATE users SET username = ? WHERE user_id = ?')
.bind(newUsername, userId).run();
const rowsAffected = meta.rows_written;
// DELETE
await env.DB.prepare('DELETE FROM users WHERE user_id = ?').bind(userId).run();
// COUNT
const count = await env.DB.prepare('SELECT COUNT(*) as total FROM users').first('total');
// EXISTS check
const exists = await env.DB.prepare('SELECT 1 FROM users WHERE email = ? LIMIT 1')
.bind(email).first();const page = parseInt(c.req.query('page') || '1');
const limit = 20;
const offset = (page - 1) * limit;
const [countResult, usersResult] = await c.env.DB.batch([
c.env.DB.prepare('SELECT COUNT(*) as total FROM users'),
c.env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?')
.bind(limit, offset)
]);
return c.json({
users: usersResult.results,
pagination: { page, limit, total: countResult.results[0].total }
});// D1 doesn't support multi-statement transactions, but batch() provides sequential execution
await env.DB.batch([
env.DB.prepare('UPDATE users SET credits = credits - ? WHERE user_id = ?').bind(amount, fromUserId),
env.DB.prepare('UPDATE users SET credits = credits + ? WHERE user_id = ?').bind(amount, toUserId),
env.DB.prepare('INSERT INTO transactions (from_user, to_user, amount) VALUES (?, ?, ?)').bind(fromUserId, toUserId, amount)
]);
// If any statement fails, batch stops (transaction-like behavior)D1_ERRORD1_EXEC_ERRORD1_TYPE_ERRORD1_COLUMN_NOTFOUND| Error | Cause | Solution |
|---|---|---|
| Statement too long | Large INSERT with 1000+ rows | Break into batches of 100-250 using |
| Network connection lost | Transient failure or large import | Implement retry logic (see below) or break into smaller chunks |
| Too many requests queued | Individual queries in loop | Use |
| D1_TYPE_ERROR | Using | Use |
| Transaction conflicts | BEGIN TRANSACTION in migration | Remove BEGIN/COMMIT (D1 handles automatically) |
| Foreign key violations | Schema changes break constraints | Use |
| D1_EXEC_ERROR: incomplete input | Multi-line SQL in D1Database.exec() | Use prepared statements or external .sql files (Issue #9133) |
D1_ERROR: Network connection lostD1 DB storage operation exceeded timeout which caused object to be resetInternal error while starting up D1 DB storage caused object to be resetD1 DB's isolate exceeded its memory limit and was resetasync function queryWithRetry<T>(
fn: () => Promise<T>,
maxRetries = 3,
baseDelay = 100
): Promise<T> {
for (let i = 0; i < maxRetries; i++) {
try {
return await fn();
} catch (error: any) {
const isTransient = error.message?.includes('Network connection lost') ||
error.message?.includes('exceeded timeout') ||
error.message?.includes('exceeded its memory limit');
if (!isTransient || i === maxRetries - 1) throw error;
// Exponential backoff
await new Promise(r => setTimeout(r, baseDelay * Math.pow(2, i)));
}
}
throw new Error('Max retries exceeded');
}
// Usage
const user = await queryWithRetry(() =>
env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).first()
);meta.total_attemptsCREATE INDEX idx_users_email ON users(email)CREATE INDEX idx_posts_user_id ON posts(user_id)CREATE INDEX idx_posts_created_at ON posts(created_at DESC)CREATE INDEX idx_posts_user_published ON posts(user_id, published)CREATE INDEX idx_users_active ON users(email) WHERE deleted = 0EXPLAIN QUERY PLAN SELECT ...CREATE INDEX idx_users_email ON users(email);
PRAGMA optimize; -- Run after schema changesSELECT *WHERE LOWER(email)# Local database (automatic creation)
npx wrangler d1 migrations apply my-database --local
npx wrangler d1 execute my-database --local --command "SELECT * FROM users"
# Remote database
npx wrangler d1 execute my-database --remote --command "SELECT * FROM users"
# Remote bindings (wrangler@4.37.0+) - connect local Worker to deployed D1
# Add to wrangler.jsonc: { "binding": "DB", "remote": true }{ "remote": true }D1_ERROR: Failed to parse body as JSON, got: error code: 1031// Keep connection alive with periodic query (optional)
setInterval(async () => {
try {
await env.DB.prepare('SELECT 1').first();
} catch (e) {
console.log('Connection keepalive failed:', e);
}
}, 30 * 60 * 1000); // Every 30 minuteswrangler dev--persist-to# Apply worker2 migrations to worker1's persistence path
cd worker2
npx wrangler d1 migrations apply DB --local --persist-to=../worker1/.wrangler/state
# Now both workers can access D1
cd ../worker1
npx wrangler dev # Both workers share the same D1 data.wrangler/state/v3/d1/miniflare-D1DatabaseObject/<database_id>.sqlitenpx wrangler d1 execute my-database --local --file=seed.sql// Hash user ID to shard number
function getShardId(userId: string): number {
const hash = Array.from(userId).reduce((acc, char) =>
((acc << 5) - acc) + char.charCodeAt(0), 0
);
return Math.abs(hash) % 10; // 10 shards
}
// wrangler.jsonc - Define 10 database shards
{
"d1_databases": [
{ "binding": "DB_SHARD_0", "database_id": "..." },
{ "binding": "DB_SHARD_1", "database_id": "..." },
{ "binding": "DB_SHARD_2", "database_id": "..." },
// ... up to DB_SHARD_9
]
}
// Get correct shard for user
function getUserDb(env: Env, userId: string): D1Database {
const shardId = getShardId(userId);
return env[`DB_SHARD_${shardId}`];
}
// Query user's data from correct shard
const db = getUserDb(env, userId);
const user = await db.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();database row size exceeded maximum allowed size// 1. Store large content in R2
const contentKey = `pages/${crypto.randomUUID()}.html`;
await env.R2_BUCKET.put(contentKey, largeHtmlContent);
// 2. Store metadata in D1
await env.DB.prepare(`
INSERT INTO pages (url, r2_key, size, created_at)
VALUES (?, ?, ?, ?)
`).bind(url, contentKey, largeHtmlContent.length, Date.now()).run();
// 3. Retrieve content
const page = await env.DB.prepare('SELECT * FROM pages WHERE url = ?')
.bind(url).first();
if (page) {
const content = await env.R2_BUCKET.get(page.r2_key);
const html = await content.text();
}-- Use lowercase for portability
CREATE TABLE users (user_id INTEGER, email TEXT);
CREATE INDEX idx_users_email ON users(email);
-- NOT: CREATE TABLE Users (UserId INTEGER, Email TEXT);-- Correct
CREATE VIRTUAL TABLE search_index USING fts5(
title,
content,
tokenize = 'porter unicode61'
);
-- Query the index
SELECT * FROM search_index WHERE search_index MATCH 'query terms';wrangler d1 exportbatch().wranglerrm -rf .wrangler
npx wrangler d1 execute db-name --file=database.sql.bind().batch()PRAGMA optimizeIF NOT EXISTSnullundefinedmeta.rows_written.exec()database_idundefinedLIMITSELECT *BEGIN TRANSACTION| Issue # | Error/Issue | Description | How to Avoid | Source |
|---|---|---|---|---|
| #1 | Statement too long | Large INSERT statements exceed D1 limits | Break into batches of 100-250 rows using | Existing |
| #2 | Transaction conflicts | | Remove BEGIN/COMMIT (D1 handles automatically) | Existing |
| #3 | Foreign key violations | Schema changes break foreign key constraints | Use | Existing |
| #4 | Rate limiting / queue overload | Too many individual queries | Use | Existing |
| #5 | Memory limit exceeded | Query loads too much data into memory | Add LIMIT, paginate results, shard queries | Existing |
| #6 | Type mismatch errors | Using | Always use | Existing |
| #7 | Lowercase BEGIN in triggers | Triggers with lowercase | Use uppercase | TIER 1 |
| #8 | Remote bindings timeout | Connection times out after 1 hour of inactivity | Restart dev server or implement keepalive pattern (Issue #10801) | TIER 1 |
| #9 | Service bindings D1 access | Auxiliary worker can't access D1 in multi-worker dev | Use | TIER 1 |
| #10 | Transient network errors | Random "Network connection lost" failures | Implement exponential backoff retry logic (D1 FAQ) | TIER 1 |
| #11 | FTS5 breaks export | Databases with FTS5 virtual tables can't export | Drop virtual tables before export, recreate after import (Issue #9519) | TIER 1 |
| #12 | Multi-line SQL in exec() | D1Database.exec() fails on multi-line SQL | Use prepared statements or external .sql files (Issue #9133) | TIER 1 |
| #13 | 10 GB database limit | Single database limited to 10 GB | Implement sharding across multiple databases (Community) | TIER 2 |
| #14 | 2 MB row size limit | Rows exceeding 2 MB fail | Use hybrid D1 + R2 storage pattern (Community) | TIER 2 |
# Database management
wrangler d1 create <DATABASE_NAME>
wrangler d1 list
wrangler d1 delete <DATABASE_NAME>
wrangler d1 info <DATABASE_NAME>
# Migrations
wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
wrangler d1 migrations list <DATABASE_NAME> --local|--remote
wrangler d1 migrations apply <DATABASE_NAME> --local|--remote
# Execute queries
wrangler d1 execute <DATABASE_NAME> --local|--remote --command "SELECT * FROM users"
wrangler d1 execute <DATABASE_NAME> --local|--remote --file=./query.sql
# Time Travel (view historical data)
wrangler d1 time-travel info <DATABASE_NAME> --timestamp "2025-10-20"
wrangler d1 time-travel restore <DATABASE_NAME> --timestamp "2025-10-20"