Loading...
Loading...
Testing expert with comprehensive knowledge of test structure, mocking strategies, async testing, coverage analysis, and cross-framework debugging. Use PROACTIVELY for test reliability, flaky test debugging, framework migration, and testing architecture decisions. Covers Jest, Vitest, Playwright, and Testing Library.
npx skill4agent add cin12211/orca-q testing-expert# Detect testing frameworks
node -e "const p=require('./package.json');console.log(Object.keys({...p.devDependencies,...p.dependencies}||{}).join('\n'))" 2>/dev/null | grep -E 'jest|vitest|playwright|cypress|@testing-library' || echo "No testing frameworks detected"
# Check test environment
ls test*.config.* jest.config.* vitest.config.* playwright.config.* 2>/dev/null || echo "No test config files found"
# Find test files
find . -name "*.test.*" -o -name "*.spec.*" | head -5 || echo "No test files found"# Fast fail approach for different frameworks
npm test || npx jest --passWithNoTests || npx vitest run --reporter=basic --no-watch
# Coverage analysis if needed
npm run test:coverage || npm test -- --coverage
# E2E validation if Playwright detected
npx playwright test --reporter=list// Bad: Repetitive setup
beforeEach(() => {
mockDatabase.clear();
mockAuth.login({ id: 1, role: 'user' });
});
// Good: Shared test utilities
// tests/utils/setup.js
export const setupTestUser = (overrides = {}) => ({
id: 1,
role: 'user',
...overrides
});
export const cleanDatabase = () => mockDatabase.clear();// Bad: Implementation-focused names
test('getUserById returns user', () => {});
test('getUserById throws error', () => {});
// Good: Behavior-focused organization
describe('User retrieval', () => {
describe('when user exists', () => {
test('should return user data with correct fields', () => {});
});
describe('when user not found', () => {
test('should throw NotFoundError with helpful message', () => {});
});
});# Clear test type boundaries
tests/
├── unit/ # Fast, isolated tests
├── integration/ # Component interaction tests
├── e2e/ # Full user journey tests
└── utils/ # Shared test utilities| Test Double | When to Use | Example |
|---|---|---|
| Spy | Monitor existing function calls | |
| Stub | Replace function with controlled output | |
| Mock | Verify interactions with dependencies | Module mocking |
// Jest
beforeEach(() => {
jest.clearAllMocks();
});
// Vitest
beforeEach(() => {
vi.clearAllMocks();
});
// Manual cleanup pattern
afterEach(() => {
// Reset any global state
// Clear test databases
// Reset environment variables
});// Good: Mock only external boundaries
jest.mock('./api/userService', () => ({
fetchUser: jest.fn(),
updateUser: jest.fn(),
}));
// Avoid: Over-mocking internal logic
// Don't mock every function in the module under test# Run tests serially to identify timing issues
npm test -- --runInBand
# Multiple runs to catch intermittent failures
for i in {1..10}; do npm test && echo "Run $i passed" || echo "Run $i failed"; done
# Memory leak detection
npm test -- --detectLeaks --logHeapUsage// Bad: Missing await
test('user creation', () => {
const user = createUser(userData); // Returns promise
expect(user.id).toBeDefined(); // Will fail
});
// Good: Proper async handling
test('user creation', async () => {
const user = await createUser(userData);
expect(user.id).toBeDefined();
});
// Testing Library async patterns
test('loads user data', async () => {
render(<UserProfile userId="123" />);
// Wait for async loading to complete
const userName = await screen.findByText('John Doe');
expect(userName).toBeInTheDocument();
});// Jest timer mocking
beforeEach(() => {
jest.useFakeTimers();
});
afterEach(() => {
jest.runOnlyPendingTimers();
jest.useRealTimers();
});
test('delayed action', async () => {
const callback = jest.fn();
setTimeout(callback, 1000);
jest.advanceTimersByTime(1000);
expect(callback).toHaveBeenCalled();
});// jest.config.js
{
"collectCoverageFrom": [
"src/**/*.{js,ts}",
"!src/**/*.d.ts",
"!src/**/*.stories.*",
"!src/**/index.ts"
],
"coverageThreshold": {
"global": {
"branches": 80,
"functions": 80,
"lines": 80,
"statements": 80
}
}
}# Generate detailed coverage reports
npm test -- --coverage --coverageReporters=text --coverageReporters=html
# Focus on uncovered branches
npm test -- --coverage | grep -A 10 "Uncovered"
# Identify critical paths without coverage
grep -r "throw\|catch" src/ | wc -l # Count error paths
npm test -- --coverage --collectCoverageFrom="src/critical/**"// Bad: Testing implementation details for coverage
test('internal calculation', () => {
const calculator = new Calculator();
expect(calculator._privateMethod()).toBe(42); // Brittle
});
// Good: Testing behavior and edge cases
test('calculation handles edge cases', () => {
expect(() => calculate(null)).toThrow('Invalid input');
expect(() => calculate(Infinity)).toThrow('Cannot calculate infinity');
expect(calculate(0)).toBe(0);
});// Database transaction pattern
beforeEach(async () => {
await db.beginTransaction();
});
afterEach(async () => {
await db.rollback();
});
// Docker test containers (if available)
beforeAll(async () => {
container = await testcontainers
.GenericContainer('postgres:13')
.withExposedPorts(5432)
.withEnv('POSTGRES_PASSWORD', 'test')
.start();
});// Page Object Model pattern
class LoginPage {
constructor(page) {
this.page = page;
this.emailInput = page.locator('[data-testid="email"]');
this.passwordInput = page.locator('[data-testid="password"]');
this.submitButton = page.locator('button[type="submit"]');
}
async login(email, password) {
await this.emailInput.fill(email);
await this.passwordInput.fill(password);
await this.submitButton.click();
}
}# Environment variable consistency
CI_ENV=true npm test # Simulate CI environment
# Docker for environment consistency
docker-compose -f test-compose.yml up -d
npm test
docker-compose -f test-compose.yml down// Jest parallelization
{
"maxWorkers": "50%",
"testTimeout": 10000,
"setupFilesAfterEnv": ["<rootDir>/tests/setup.js"]
}
// Vitest performance config
export default {
test: {
threads: true,
maxThreads: 4,
minThreads: 2,
isolate: false // For faster execution, trade isolation
}
}# Test sharding for large suites
npm test -- --shard=1/4 # Run 1 of 4 shards
# Caching strategies
npm ci --cache .npm-cache
npm test -- --cache --cacheDirectory=.test-cache
# Retry configuration for flaky tests
npm test -- --retries=3getByRolegetByTestId# Run tests multiple times to identify patterns
npm test -- --runInBand --verbose 2>&1 | tee test-output.log
grep -i "timeout\|error\|fail" test-output.log# Find mock usage patterns
grep -r "jest.mock\|vi.mock\|jest.fn" tests/ | head -10beforeEach# Check environment consistency
env NODE_ENV=test npm test
CI=true NODE_ENV=test npm test# Package.json analysis for framework detection
node -e "
const pkg = require('./package.json');
const deps = {...pkg.dependencies, ...pkg.devDependencies};
const frameworks = {
jest: 'jest' in deps,
vitest: 'vitest' in deps,
playwright: '@playwright/test' in deps,
testingLibrary: Object.keys(deps).some(d => d.startsWith('@testing-library'))
};
console.log(JSON.stringify(frameworks, null, 2));
" 2>/dev/null || echo "Could not analyze package.json"# Test configuration detection
find . -maxdepth 2 -name "*.config.*" | grep -E "(jest|vitest|playwright)" || echo "No test config files found"# Debug failing tests
npm test -- --runInBand --verbose --no-cache
# Performance analysis
npm test -- --logHeapUsage --detectLeaks
# Coverage with thresholds
npm test -- --coverage --coverageThreshold='{"global":{"branches":80}}'# Performance debugging
vitest --reporter=verbose --no-file-parallelism
# UI mode for debugging
vitest --ui --coverage.enabled
# Browser testing
vitest --browser.enabled --browser.name=chrome# Debug with headed browser
npx playwright test --debug --headed
# Generate test report
npx playwright test --reporter=html
# Cross-browser testing
npx playwright test --project=chromium --project=firefoxNew project, modern stack? → Vitest
Existing Jest setup? → Stay with Jest
E2E testing needed? → Add Playwright
React/component testing? → Testing Library + (Jest|Vitest)Intermittent failures? → Run with --runInBand, check async patterns
CI-only failures? → Check environment differences, add retries
Timing issues? → Mock timers, use waitFor patterns
Memory issues? → Check cleanup, use --detectLeaksSlow test suite? → Enable parallelization, check test isolation
Large codebase? → Use test sharding, optimize imports
CI performance? → Cache dependencies, use test splitting
Memory usage? → Review mock cleanup, check for leaks