Loading...
Loading...
Compare original and translation side by side
YYYYMMDDHHMMSS_descriptive_name20240315143022_add_status_to_orders.sqladdremovecreatedroprenamealterIF NOT EXISTSIF EXISTSmigrations/
20240301100000_create_users_table.sql
20240305120000_add_email_index_to_users.sql
20240310090000_create_orders_table.sql
20240315143022_add_status_to_orders.sqlYYYYMMDDHHMMSS_描述性名称20240315143022_add_status_to_orders.sqladdremovecreatedroprenamealterIF NOT EXISTSIF EXISTSmigrations/
20240301100000_create_users_table.sql
20240305120000_add_email_index_to_users.sql
20240310090000_create_orders_table.sql
20240315143022_add_status_to_orders.sql-- Migration: 20240315143022_add_status_to_orders.sql
-- UP
ALTER TABLE orders ADD COLUMN status VARCHAR(50) DEFAULT 'pending';
CREATE INDEX idx_orders_status ON orders (status);
-- DOWN
DROP INDEX IF EXISTS idx_orders_status;
ALTER TABLE orders DROP COLUMN IF EXISTS status;-- 迁移文件: 20240315143022_add_status_to_orders.sql
-- 正向执行
ALTER TABLE orders ADD COLUMN status VARCHAR(50) DEFAULT 'pending';
CREATE INDEX idx_orders_status ON orders (status);
-- 回滚执行
DROP INDEX IF EXISTS idx_orders_status;
ALTER TABLE orders DROP COLUMN IF EXISTS status;| Scenario | Rollback Strategy |
|---|---|
| Add column | Drop column |
| Add index | Drop index |
| Create table | Drop table |
| Add constraint | Drop constraint |
| Rename column | Rename back |
| Drop column | Cannot auto-rollback — must restore from backup or use prior migration to re-add |
| Data backfill | Reverse backfill or accept data state |
| 场景 | 回滚策略 |
|---|---|
| 添加列 | 删除列 |
| 添加索引 | 删除索引 |
| 创建表 | 删除表 |
| 添加约束 | 删除约束 |
| 重命名列 | 改回原名称 |
| 删除列 | 无法自动回滚 —— 必须从备份恢复或使用之前的迁移重新添加 |
| 数据回填 | 反向回填或接受当前数据状态 |
ALTER TABLE ... RENAME COLUMNALTER TABLE users ADD COLUMN full_name VARCHAR(255);// Write to both columns
await db.query(
'UPDATE users SET name = $1, full_name = $1 WHERE id = $2',
[name, userId]
);UPDATE users SET full_name = name WHERE full_name IS NULL;// Read from new column
const user = await db.query('SELECT full_name FROM users WHERE id = $1', [userId]);ALTER TABLE users DROP COLUMN name;ALTER TABLE ... RENAME COLUMNALTER TABLE users ADD COLUMN full_name VARCHAR(255);// 同时写入两列
await db.query(
'UPDATE users SET name = $1, full_name = $1 WHERE id = $2',
[name, userId]
);UPDATE users SET full_name = name WHERE full_name IS NULL;// 从新列读取
const user = await db.query('SELECT full_name FROM users WHERE id = $1', [userId]);ALTER TABLE users DROP COLUMN name;NOT NULLNOT VALIDALTER TABLE orders ADD CONSTRAINT orders_status_not_null
CHECK (status IS NOT NULL) NOT VALID;UPDATE orders SET status = 'unknown' WHERE status IS NULL;ALTER TABLE orders VALIDATE CONSTRAINT orders_status_not_null;NOT NULLALTER TABLE orders ALTER COLUMN status SET NOT NULL;
ALTER TABLE orders DROP CONSTRAINT orders_status_not_null;NOT NULLNOT VALIDALTER TABLE orders ADD CONSTRAINT orders_status_not_null
CHECK (status IS NOT NULL) NOT VALID;UPDATE orders SET status = 'unknown' WHERE status IS NULL;ALTER TABLE orders VALIDATE CONSTRAINT orders_status_not_null;NOT NULLALTER TABLE orders ALTER COLUMN status SET NOT NULL;
ALTER TABLE orders DROP CONSTRAINT orders_status_not_null;| Operation | Risk | Safe Alternative |
|---|---|---|
| Breaks queries referencing old name | Expand-and-contract pattern (add new, migrate, drop old) |
| May require full table rewrite, long lock | Add new column with new type, backfill, swap |
| Irreversible data loss | Verify no code references, back up data, then drop |
| Irreversible data and schema loss | Rename to |
| Fails if nulls exist; full table scan | Add as nullable, backfill, then add constraint |
| Full table rewrite on older PostgreSQL | Add nullable column, set default, backfill |
| Blocks writes on table (non-concurrent) | Use |
| Validates all existing rows, long lock | Add as |
| Exclusive lock blocks all access | Minimize lock duration, run during low traffic |
| 操作 | 风险 | 安全替代方案 |
|---|---|---|
| 破坏引用旧列名的查询 | 扩展-收缩模式(添加新列、迁移数据、删除旧列) |
| 可能需要全表重写,锁定时间长 | 添加带新类型的新列、回填数据、切换使用 |
| 不可逆的数据丢失 | 确认无代码引用、备份数据后再删除 |
| 不可逆的数据和模式丢失 | 先重命名为 |
| 若存在空值则失败;全表扫描 | 先添加可为空的列、回填数据,再添加约束 |
| 旧版PostgreSQL会触发全表重写 | 添加可为空列、设置默认值、回填数据 |
| 锁定表,阻止写入(非并发) | 使用 |
| 验证所有现有行,锁定时间长 | 先添加为 |
| 排他锁阻止所有访问 | 最小化锁定时长,在低流量时段执行 |
// Batch backfill to avoid locking and memory issues
async function backfillStatus(db, batchSize = 1000) {
let totalUpdated = 0;
let updated;
do {
const result = await db.query(`
UPDATE orders
SET status = 'pending'
WHERE id IN (
SELECT id FROM orders
WHERE status IS NULL
LIMIT $1
FOR UPDATE SKIP LOCKED
)
RETURNING id
`, [batchSize]);
updated = result.rowCount;
totalUpdated += updated;
console.log(`Backfilled ${totalUpdated} rows...`);
// Pause between batches to reduce load
await new Promise(resolve => setTimeout(resolve, 100));
} while (updated === batchSize);
console.log(`Backfill complete. Total rows updated: ${totalUpdated}`);
}// 批量回填以避免锁定和内存问题
async function backfillStatus(db, batchSize = 1000) {
let totalUpdated = 0;
let updated;
do {
const result = await db.query(`
UPDATE orders
SET status = 'pending'
WHERE id IN (
SELECT id FROM orders
WHERE status IS NULL
LIMIT $1
FOR UPDATE SKIP LOCKED
)
RETURNING id
`, [batchSize]);
updated = result.rowCount;
totalUpdated += updated;
console.log(`已回填 ${totalUpdated} 行...`);
// 批次间暂停以降低负载
await new Promise(resolve => setTimeout(resolve, 100));
} while (updated === batchSize);
console.log(`回填完成。更新总行数:${totalUpdated}`);
}-- 1. Create the new table with desired schema
CREATE TABLE users_v2 (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
full_name VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- 2. Copy data in batches
INSERT INTO users_v2 (full_name, email, created_at)
SELECT
COALESCE(first_name || ' ' || last_name, first_name, 'Unknown'),
email,
created_at
FROM users
WHERE id BETWEEN 1 AND 10000;
-- Repeat for remaining batches
-- 3. Set up triggers or dual-write for new data during migration
-- 4. Swap tables atomically
ALTER TABLE users RENAME TO users_deprecated;
ALTER TABLE users_v2 RENAME TO users;
-- 5. Drop old table after observation period
-- DROP TABLE users_deprecated;-- 1. 创建具有所需模式的新表
CREATE TABLE users_v2 (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
full_name VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- 2. 批量复制数据
INSERT INTO users_v2 (full_name, email, created_at)
SELECT
COALESCE(first_name || ' ' || last_name, first_name, 'Unknown'),
email,
created_at
FROM users
WHERE id BETWEEN 1 AND 10000;
-- 对剩余批次重复执行
-- 3. 迁移期间为新数据设置触发器或双写
-- 4. 原子性交换表
ALTER TABLE users RENAME TO users_deprecated;
ALTER TABLE users_v2 RENAME TO users;
-- 5. 观察期后删除旧表
-- DROP TABLE users_deprecated;undefinedundefinedundefinedundefined// seeds/001_users.js
exports.seed = async function(knex) {
await knex('users').del();
await knex('users').insert([
{ id: 1, name: 'Alice Developer', email: 'alice@example.com', role: 'admin' },
{ id: 2, name: 'Bob Tester', email: 'bob@example.com', role: 'user' },
{ id: 3, name: 'Carol Manager', email: 'carol@example.com', role: 'manager' },
]);
};
// seeds/002_orders.js
exports.seed = async function(knex) {
await knex('orders').del();
await knex('orders').insert([
{ id: 1, user_id: 1, status: 'completed', total: 99.99 },
{ id: 2, user_id: 2, status: 'pending', total: 49.50 },
{ id: 3, user_id: 1, status: 'shipped', total: 150.00 },
]);
};// seeds/001_users.js
exports.seed = async function(knex) {
await knex('users').del();
await knex('users').insert([
{ id: 1, name: 'Alice Developer', email: 'alice@example.com', role: 'admin' },
{ id: 2, name: 'Bob Tester', email: 'bob@example.com', role: 'user' },
{ id: 3, name: 'Carol Manager', email: 'carol@example.com', role: 'manager' },
]);
};
// seeds/002_orders.js
exports.seed = async function(knex) {
await knex('orders').del();
await knex('orders').insert([
{ id: 1, user_id: 1, status: 'completed', total: 99.99 },
{ id: 2, user_id: 2, status: 'pending', total: 49.50 },
{ id: 3, user_id: 1, status: 'shipped', total: 150.00 },
]);
};| Tool | Language | Migration Format | Rollback | Auto-Generate | Key Feature |
|---|---|---|---|---|---|
| Prisma Migrate | JS/TS | SQL files from schema diff | Limited (reset-based) | Yes, from | Declarative schema, drift detection |
| Knex.js | JS/TS | JavaScript files | Manual | No (manual) | Flexible, raw SQL support |
| Alembic | Python | Python files | Manual | Yes, from SQLAlchemy models | Branching support, auto-detect |
| ActiveRecord | Ruby | Ruby DSL files | Automatic | No (manual) | Reversible DSL methods |
| Diesel | Rust | SQL files | Manual | Yes, from schema diff | Compile-time schema verification |
| Flyway | Java/JVM | SQL or Java files | Paid feature (undo) | No (manual) | Convention-based, polyglot |
| golang-migrate | Go | SQL files | Manual | No (manual) | CLI-first, driver-agnostic |
| 工具 | 语言 | 迁移格式 | 回滚支持 | 自动生成 | 核心特性 |
|---|---|---|---|---|---|
| Prisma Migrate | JS/TS | 基于模式差异生成的SQL文件 | 有限(基于重置) | 是,从 | 声明式模式、漂移检测 |
| Knex.js | JS/TS | JavaScript文件 | 手动 | 否(手动编写) | 灵活、支持原生SQL |
| Alembic | Python | Python文件 | 手动 | 是,从SQLAlchemy模型生成 | 分支支持、自动检测 |
| ActiveRecord | Ruby | Ruby DSL文件 | 自动可逆 | 否(手动编写) | 可逆DSL方法 |
| Diesel | Rust | SQL文件 | 手动 | 是,基于模式差异生成 | 编译时模式验证 |
| Flyway | Java/JVM | SQL或Java文件 | 付费功能(撤销) | 否(手动编写) | 基于约定、多语言支持 |
| golang-migrate | Go | SQL文件 | 手动 | 否(手动编写) | 命令行优先、驱动无关 |
| DDL Operation | Lock Acquired | Blocks Reads | Blocks Writes | Duration |
|---|---|---|---|---|
| | No | Yes | Duration of index build |
| | No | No | Longer build, but non-blocking |
| | Yes | Yes | Near-instant (metadata only) |
| | Yes | Yes | Near-instant (virtual default) |
| | Yes | Yes | Near-instant (marks as dropped) |
| | Yes | Yes | Full table rewrite |
| | Yes | Yes | Full table scan to validate |
| | No | Partially | Near-instant |
| | No | No | Scans table, non-blocking |
| DDL操作 | 获取的锁 | 阻塞读 | 阻塞写 | 持续时间 |
|---|---|---|---|---|
| | 否 | 是 | 索引构建期间 |
| | 否 | 否 | 构建时间更长,但非阻塞 |
| | 是 | 是 | 几乎即时(仅元数据操作) |
| | 是 | 是 | 几乎即时(虚拟默认值) |
| | 是 | 是 | 几乎即时(标记为已删除) |
| | 是 | 是 | 全表重写期间 |
| | 是 | 是 | 全表扫描验证期间 |
| | 否 | 部分阻塞 | 几乎即时 |
| | 否 | 否 | 表扫描期间,非阻塞 |
-- Set a lock timeout to fail fast instead of waiting indefinitely
SET lock_timeout = '5s';
-- Retry the operation if it times out
-- Application code should handle this and retry with backoff
-- Kill long-running queries that block migrations
SELECT pg_cancel_backend(pid)
FROM pg_stat_activity
WHERE state = 'active'
AND query_start < NOW() - INTERVAL '5 minutes'
AND query NOT LIKE '%pg_stat_activity%';-- 设置锁超时,快速失败而非无限等待
SET lock_timeout = '5s';
-- 如果超时则重试操作
-- 应用代码应处理此情况并退避重试
-- 终止阻塞迁移的长时间运行查询
SELECT pg_cancel_backend(pid)
FROM pg_stat_activity
WHERE state = 'active'
AND query_start < NOW() - INTERVAL '5 minutes'
AND query NOT LIKE '%pg_stat_activity%';