Loading...
Loading...
Guide for Convex performance optimization including denormalization, index design, avoiding N+1 queries, OCC (Optimistic Concurrency Control), and handling hot spots. Use when optimizing query performance, designing data models, handling high-contention writes, or troubleshooting OCC errors. Activates for performance issues, index optimization, denormalization patterns, or concurrency control tasks.
npx skill4agent add fluid-tools/claude-skills convex-performance-patternsany@typescript-eslint/no-explicit-anyanyexport const getTeamWithMembers = query({
args: { teamId: v.id("teams") },
returns: v.null(),
handler: async (ctx, args) => {
const team = await ctx.db.get(args.teamId);
if (!team) return null;
// ❌ This triggers N additional reads, each causing re-renders
const members = await Promise.all(
team.memberIds.map((id) => ctx.db.get(id))
);
return { team, members };
},
});// Schema: teams.members: v.array(v.object({ userId: v.id("users"), name: v.string(), avatar: v.string() }))
export const getTeamWithMembers = query({
args: { teamId: v.id("teams") },
returns: v.union(
v.object({
_id: v.id("teams"),
_creationTime: v.number(),
name: v.string(),
members: v.array(
v.object({
userId: v.id("users"),
name: v.string(),
avatar: v.string(),
})
),
}),
v.null()
),
handler: async (ctx, args) => {
return await ctx.db.get(args.teamId); // Single read, includes members
},
});.collect()const messages = await ctx.db
.query("messages")
.withIndex("by_channel", (q) => q.eq("channelId", channelId))
.collect();
const count = messages.length;const messages = await ctx.db
.query("messages")
.withIndex("by_channel", (q) => q.eq("channelId", channelId))
.take(100);
const count = messages.length === 100 ? "99+" : String(messages.length);// Maintain a separate "channelStats" table with messageCount field
// Update it in the same mutation that inserts messages
export const getMessageCount = query({
args: { channelId: v.id("channels") },
returns: v.number(),
handler: async (ctx, args) => {
const stats = await ctx.db
.query("channelStats")
.withIndex("by_channel", (q) => q.eq("channelId", args.channelId))
.unique();
return stats?.messageCount ?? 0;
},
});
export const addMessage = mutation({
args: { channelId: v.id("channels"), content: v.string() },
returns: v.id("messages"),
handler: async (ctx, args) => {
const messageId = await ctx.db.insert("messages", {
channelId: args.channelId,
content: args.content,
});
// Update denormalized count
const stats = await ctx.db
.query("channelStats")
.withIndex("by_channel", (q) => q.eq("channelId", args.channelId))
.unique();
if (stats) {
await ctx.db.patch(stats._id, { messageCount: stats.messageCount + 1 });
} else {
await ctx.db.insert("channelStats", {
channelId: args.channelId,
messageCount: 1,
});
}
return messageId;
},
});// Schema
export default defineSchema({
posts: defineTable({
body: v.string(),
tags: v.array(v.string()),
// Denormalized: computed on write
isImportant: v.boolean(),
}).index("by_important", ["isImportant"]),
});
// Mutation: compute on write
export const createPost = mutation({
args: { body: v.string(), tags: v.array(v.string()) },
returns: v.id("posts"),
handler: async (ctx, args) => {
return await ctx.db.insert("posts", {
body: args.body,
tags: args.tags,
isImportant: args.tags.includes("important"), // Denormalize!
});
},
});
// Query: O(log n) lookup
export const getImportantPosts = query({
args: {},
returns: v.array(
v.object({
_id: v.id("posts"),
_creationTime: v.number(),
body: v.string(),
isImportant: v.boolean(),
})
),
handler: async (ctx) => {
return await ctx.db
.query("posts")
.withIndex("by_important", (q) => q.eq("isImportant", true))
.collect();
},
});// Schema
export default defineSchema({
messages: defineTable({
channelId: v.id("channels"),
authorId: v.id("users"),
content: v.string(),
isDeleted: v.boolean(),
})
// ✅ This single index serves THREE query patterns:
// 1. All messages in channel: .eq("channelId", id)
// 2. Messages by author in channel: .eq("channelId", id).eq("authorId", id)
// 3. Non-deleted messages by author: .eq("channelId", id).eq("authorId", id).eq("isDeleted", false)
.index("by_channel_author_deleted", ["channelId", "authorId", "isDeleted"]),
});
// ❌ REDUNDANT: Don't create by_channel if you have by_channel_author_deleted
// The compound index can serve channel-only queries by partial prefix matchby_field1_and_field2_and_field3.index("by_channel", ["channelId"])
.index("by_channel_and_author", ["channelId", "authorId"])
.index("by_user_and_status_and_createdAt", ["userId", "status", "createdAt"]).filter()const activeUsers = await ctx.db
.query("users")
.filter((q) => q.eq(q.field("status"), "active"))
.collect();const activeUsers = await ctx.db
.query("users")
.withIndex("by_status", (q) => q.eq("status", "active"))
.collect();// Only if the dataset is bounded!
const allUsers = await ctx.db.query("users").take(1000);
const filtered = allUsers.filter(
(u) => u.status === "active" && u.role !== "bot"
);export const incrementCounter = mutation({
args: {},
returns: v.null(),
handler: async (ctx) => {
const counter = await ctx.db.query("counters").unique();
await ctx.db.patch(counter!._id, { count: counter!.count + 1 });
return null;
},
});
// If 100 users click at once, 99 will retry → cascading OCC errors// Schema: counterShards table
export default defineSchema({
counterShards: defineTable({
shardId: v.number(),
delta: v.number(),
}).index("by_shard", ["shardId"]),
});
// On write: pick random shard
export const incrementCounter = mutation({
args: {},
returns: v.null(),
handler: async (ctx) => {
const shardId = Math.floor(Math.random() * 10);
await ctx.db.insert("counterShards", { shardId, delta: 1 });
return null;
},
});
// On read: sum all shards
export const getCount = query({
args: {},
returns: v.number(),
handler: async (ctx) => {
const shards = await ctx.db.query("counterShards").collect();
return shards.reduce((sum, s) => sum + s.delta, 0);
},
});import { Workpool } from "@convex-dev/workpool";
import { components } from "./_generated/api";
const counterPool = new Workpool(components.counterWorkpool, {
maxParallelism: 1, // Serialize all counter updates
});
export const incrementCounter = mutation({
args: {},
returns: v.null(),
handler: async (ctx) => {
await counterPool.enqueueMutation(ctx, internal.counters.doIncrement, {});
return null;
},
});import { Aggregate } from "@convex-dev/aggregate";
// Atomic increments without OCC conflicts
await aggregate.insert(ctx, "pageViews", 1);
const total = await aggregate.sum(ctx);ctx.schedulerctx.runQueryexport const processTeam = action({
args: { teamId: v.id("teams") },
returns: v.null(),
handler: async (ctx, args) => {
const team = await ctx.runQuery(internal.teams.getTeam, {
teamId: args.teamId,
});
const owner = await ctx.runQuery(internal.users.getUser, {
userId: team.ownerId,
});
// Owner might have changed between the two queries!
return null;
},
});export const processTeam = action({
args: { teamId: v.id("teams") },
returns: v.null(),
handler: async (ctx, args) => {
const teamWithOwner = await ctx.runQuery(internal.teams.getTeamWithOwner, {
teamId: args.teamId,
});
// Team and owner fetched atomically
return null;
},
});export const createUsers = action({
args: { users: v.array(v.object({ name: v.string() })) },
returns: v.null(),
handler: async (ctx, args) => {
for (const user of args.users) {
await ctx.runMutation(internal.users.insert, { user });
}
// If third insert fails, first two still exist!
return null;
},
});export const createUsers = mutation({
args: { users: v.array(v.object({ name: v.string() })) },
returns: v.array(v.id("users")),
handler: async (ctx, args) => {
const ids: Id<"users">[] = [];
for (const user of args.users) {
ids.push(
await ctx.db.insert("users", { name: user.name, createdAt: Date.now() })
);
}
return ids; // All succeed or all fail together
},
});// ❌ BAD: Unbounded collect
const allMessages = await ctx.db
.query("messages")
.withIndex("by_channel", (q) => q.eq("channelId", channelId))
.collect();
// ✅ GOOD: Bounded with take()
const recentMessages = await ctx.db
.query("messages")
.withIndex("by_channel", (q) => q.eq("channelId", channelId))
.order("desc")
.take(50);export const getDashboard = query({
args: { userId: v.id("users") },
returns: v.object({
user: v.object({ _id: v.id("users"), name: v.string() }),
stats: v.object({ messageCount: v.number(), channelCount: v.number() }),
}),
handler: async (ctx, args) => {
// Fetch in parallel - both queries run simultaneously
const [user, stats] = await Promise.all([
ctx.db.get(args.userId),
ctx.db
.query("userStats")
.withIndex("by_user", (q) => q.eq("userId", args.userId))
.unique(),
]);
if (!user) throw new Error("User not found");
return {
user: { _id: user._id, name: user.name },
stats: stats ?? { messageCount: 0, channelCount: 0 },
};
},
});// ❌ BAD: Collecting then taking first
const users = await ctx.db
.query("users")
.withIndex("by_email", (q) => q.eq("email", email))
.collect();
const user = users[0];
// ✅ GOOD: Use .first() or .unique()
const user = await ctx.db
.query("users")
.withIndex("by_email", (q) => q.eq("email", email))
.first();
// For exactly-one semantics (throws if multiple)
const user = await ctx.db
.query("users")
.withIndex("by_email", (q) => q.eq("email", email))
.unique();const posts = await ctx.db.query("posts").take(10);
const postsWithAuthors = await Promise.all(
posts.map(async (post) => ({
...post,
author: await ctx.db.get(post.authorId), // N additional queries!
}))
);// Option 1: Denormalize author info into posts
// Schema: posts.author: v.object({ id: v.id("users"), name: v.string() })
// Option 2: Batch fetch with getAll (from convex-helpers)
import { getAll } from "convex-helpers/server/relationships";
const posts = await ctx.db.query("posts").take(10);
const authorIds = [...new Set(posts.map((p) => p.authorId))];
const authors = await getAll(ctx.db, authorIds);
const authorMap = new Map(authors.map((a) => [a._id, a]));
const postsWithAuthors = posts.map((post) => ({
...post,
author: authorMap.get(post.authorId),
}));// Full table scan!
const allItems = await ctx.db.query("items").collect();// With pagination or limits
const items = await ctx.db.query("items").take(100);
// Or with index if filtering
const items = await ctx.db
.query("items")
.withIndex("by_status", (q) => q.eq("status", "active"))
.take(100);// Global counter - constant OCC conflicts under load
const global = await ctx.db.query("globals").unique();
await ctx.db.patch(global!._id, { viewCount: global!.viewCount + 1 });// Sharded counter
const shardId = Math.floor(Math.random() * 10);
await ctx.db.insert("viewShards", { shardId, delta: 1, timestamp: Date.now() });
// Periodic aggregation job consolidates shards.filter().collect()take(n).collect().length| Pattern | Method | Use Case |
|---|---|---|
| Get by ID | | Single document lookup |
| Get multiple | | Multiple documents (use |
| Get first | | First matching document |
| Get unique | | Exactly one document (throws if multiple) |
| Indexed query | | Efficient filtered query |
// Equality on all fields
.withIndex("by_a_b_c", (q) => q.eq("a", 1).eq("b", 2).eq("c", 3))
// Prefix match (uses first N fields)
.withIndex("by_a_b_c", (q) => q.eq("a", 1).eq("b", 2))
// Range on last field
.withIndex("by_a_b_c", (q) => q.eq("a", 1).eq("b", 2).gt("c", 0))
// Cannot skip fields in the middle!
// ❌ .withIndex("by_a_b_c", (q) => q.eq("a", 1).eq("c", 3))