Loading...
Loading...
Set up performance benchmarks and CodSpeed harness for a project. Use this skill whenever the user wants to create benchmarks, add performance tests, set up CodSpeed, configure codspeed.yml, integrate a benchmarking framework (criterion, divan, pytest-benchmark, vitest bench, go test -bench, google benchmark), or when the user says 'add benchmarks', 'set up perf tests', 'create a benchmark', 'benchmark this', or wants to measure performance of their code for the first time. Also trigger when the optimize skill needs benchmarks that don't exist yet.
npx skill4agent add codspeedhq/codspeed codspeed-setup-harnessCargo.tomlpackage.jsonpyproject.tomlgo.modCMakeLists.txtcodspeed.ymlcodspeed auth login| Language | Framework | How to set up |
|---|---|---|
| Rust | divan (recommended), criterion, bencher | Add |
| Python | pytest-benchmark | Install |
| Node.js | vitest (recommended), tinybench v5, benchmark.js | Install |
| Go | go test -bench | No packages needed — CodSpeed instruments |
| C/C++ | Google Benchmark | Build with CMake, CodSpeed instruments via valgrind-codspeed |
codspeed exec -m <mode> -- <command>codspeed.ymlcargo add divan
cargo add codspeed-divan-compat --rename divan --devbenches/// benches/my_bench.rs
use divan;
fn main() {
divan::main();
}
#[divan::bench]
fn bench_my_function() {
// Call the function you want to benchmark
// Use divan::black_box() to prevent compiler optimization
divan::black_box(my_crate::my_function());
}Cargo.toml[[bench]]
name = "my_bench"
harness = falsecargo codspeed build -m simulation --bench my_bench
codspeed run -m simulation -- cargo codspeed run --bench my_benchcargo add criterion --dev
cargo add codspeed-criterion-compat --rename criterion --devbenches/use criterion::{criterion_group, criterion_main, Criterion};
fn bench_my_function(c: &mut Criterion) {
c.bench_function("my_function", |b| {
b.iter(|| my_crate::my_function())
});
}
criterion_group!(benches, bench_my_function);
criterion_main!(benches);Cargo.tomlpip install pytest-codspeed
# or
uv add --dev pytest-codspeed# tests/test_benchmarks.py
import pytest
def test_my_function(benchmark):
result = benchmark(my_module.my_function, arg1, arg2)
# You can still assert on the result
assert result is not None
# Or using the pedantic API for setup/teardown:
def test_with_setup(benchmark):
data = prepare_data()
benchmark.pedantic(my_module.process, args=(data,), rounds=100)codspeed run -m simulation -- pytest --codspeednpm install -D @codspeed/vitest-plugin
# or
pnpm add -D @codspeed/vitest-pluginvitest.config.tsimport { defineConfig } from "vitest/config";
import codspeed from "@codspeed/vitest-plugin";
export default defineConfig({
plugins: [codspeed()],
});// bench/my.bench.ts
import { bench, describe } from "vitest";
describe("my module", () => {
bench("my function", () => {
myFunction();
});
});codspeed run -m simulation -- npx vitest benchgo test -bench// my_test.go
func BenchmarkMyFunction(b *testing.B) {
for i := 0; i < b.N; i++ {
MyFunction()
}
}codspeed run -m walltime -- go test -bench . ./...#include <benchmark/benchmark.h>
static void BM_MyFunction(benchmark::State& state) {
for (auto _ : state) {
MyFunction();
}
}
BENCHMARK(BM_MyFunction);
BENCHMARK_MAIN();cmake -B build && cmake --build build
codspeed run -m simulation -- ./build/my_benchmarkcodspeed.yml$schema: https://raw.githubusercontent.com/CodSpeedHQ/codspeed/refs/heads/main/schemas/codspeed.schema.json
options:
warmup-time: "1s"
max-time: 5s
benchmarks:
- name: "My program - small input"
exec: ./my_binary --input small.txt
- name: "My program - large input"
exec: ./my_binary --input large.txt
options:
max-time: 30scodspeed run -m walltimecodspeed exec -m walltime -- ./my_binary --input data.txtblack_box()benchmark.pedantic()# For language-specific harnesses
cargo codspeed build -m simulation && codspeed run -m simulation -- cargo codspeed run
# or
codspeed run -m simulation -- pytest --codspeed
# or
codspeed run -m simulation -- npx vitest bench
# etc.
# For exec harness
codspeed run -m walltimequery_flamegraphoptimize