Loading...
Loading...
Apply when implementing retry logic, rate limit handling, or resilience patterns in VTEX API integrations. Covers VTEX rate limit headers (X-RateLimit-Remaining, X-RateLimit-Reset, Retry-After), 429 status handling, exponential backoff with jitter, circuit breaker patterns, and request queuing. Use for any VTEX marketplace integration that must gracefully handle API throttling and maintain high availability.
npx skill4agent add vtexdocs/ai-skills marketplace-rate-limitingRetry-AfterX-RateLimit-RemainingX-RateLimit-Resetmarketplace-catalog-syncmarketplace-order-hookmarketplace-fulfillmentdelay = min(maxDelay, baseDelay * 2^attempt) * (0.5 + random(0, 0.5))Retry-AfterRetry-AfterX-RateLimit-Remaining| Header | Description |
|---|---|
| Seconds to wait before retrying (present on 429 responses) |
| Number of requests remaining in the current window |
| Timestamp (seconds) when the rate limit window resets |
Your Integration VTEX API
│ │
│── Request ──────────────────────────▶│
│◀── 200 OK ─────────────────────────│ (success)
│ │
│── Request ──────────────────────────▶│
│◀── 429 + Retry-After: 30 ──────────│ (rate limited)
│ │
│ [Wait: max(Retry-After, backoff)] │
│ [backoff = base * 2^attempt * jitter]│
│ │
│── Retry ───────────────────────────▶│
│◀── 200 OK ─────────────────────────│ (success)Retry-AfterRetry-Afterwhile(true)setIntervalimport axios, { AxiosInstance, AxiosError, AxiosRequestConfig, AxiosResponse } from "axios";
interface RetryConfig {
maxRetries: number;
baseDelayMs: number;
maxDelayMs: number;
}
const DEFAULT_RETRY_CONFIG: RetryConfig = {
maxRetries: 5,
baseDelayMs: 1000,
maxDelayMs: 60000,
};
/**
* Calculates exponential backoff delay with full jitter.
*
* Formula: min(maxDelay, baseDelay * 2^attempt) * random(0.5, 1.0)
*
* The jitter prevents thundering herd when multiple clients
* are rate-limited simultaneously.
*/
function calculateBackoffWithJitter(
attempt: number,
baseDelayMs: number,
maxDelayMs: number
): number {
const exponentialDelay = baseDelayMs * Math.pow(2, attempt);
const boundedDelay = Math.min(maxDelayMs, exponentialDelay);
// Full jitter: random value between 50% and 100% of the bounded delay
const jitter = 0.5 + Math.random() * 0.5;
return Math.floor(boundedDelay * jitter);
}
/**
* Executes an API request with automatic retry on 429 responses.
* Respects the Retry-After header and applies exponential backoff with jitter.
*/
async function requestWithRetry<T>(
client: AxiosInstance,
config: AxiosRequestConfig,
retryConfig: RetryConfig = DEFAULT_RETRY_CONFIG
): Promise<AxiosResponse<T>> {
let lastError: AxiosError | undefined;
for (let attempt = 0; attempt <= retryConfig.maxRetries; attempt++) {
try {
return await client.request<T>(config);
} catch (error: unknown) {
if (!axios.isAxiosError(error)) {
throw error;
}
lastError = error;
const status = error.response?.status;
// Only retry on 429 (rate limited) and 503 (circuit breaker)
if (status !== 429 && status !== 503) {
throw error;
}
if (attempt === retryConfig.maxRetries) {
break; // Exhausted retries
}
// Respect Retry-After header if present (value is in seconds)
const retryAfterHeader = error.response?.headers?.["retry-after"];
const retryAfterMs = retryAfterHeader
? parseInt(retryAfterHeader, 10) * 1000
: 0;
// Use the greater of Retry-After or calculated backoff
const backoffMs = calculateBackoffWithJitter(
attempt,
retryConfig.baseDelayMs,
retryConfig.maxDelayMs
);
const delayMs = Math.max(retryAfterMs, backoffMs);
console.warn(
`Rate limited (${status}). Retry ${attempt + 1}/${retryConfig.maxRetries} ` +
`in ${delayMs}ms (Retry-After: ${retryAfterHeader ?? "none"}, ` +
`backoff: ${backoffMs}ms)`
);
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
}
throw lastError ?? new Error("Request failed after all retries");
}// WRONG: Immediate retry without backoff or Retry-After respect
async function retryImmediately<T>(
client: AxiosInstance,
config: AxiosRequestConfig,
maxRetries: number = 3
): Promise<T> {
for (let i = 0; i < maxRetries; i++) {
try {
const response = await client.request<T>(config);
return response.data;
} catch (error: unknown) {
// Retries immediately — will hit 429 again and drain burst credits
// Does not read Retry-After header — ignores server guidance
console.log(`Retry ${i + 1}...`);
// No delay at all — thundering herd when multiple instances retry
}
}
throw new Error("Failed after retries");
}Retry-AfterRetry-AfterRetry-AfterRetry-Afterfunction getRetryDelayMs(error: AxiosError, attempt: number): number {
const retryAfterHeader = error.response?.headers?.["retry-after"];
// Parse Retry-After (could be seconds or HTTP-date)
let retryAfterMs = 0;
if (retryAfterHeader) {
const seconds = parseInt(retryAfterHeader, 10);
if (!isNaN(seconds)) {
retryAfterMs = seconds * 1000;
} else {
// HTTP-date format
const retryDate = new Date(retryAfterHeader).getTime();
retryAfterMs = Math.max(0, retryDate - Date.now());
}
}
// Calculate backoff with jitter
const backoffMs = calculateBackoffWithJitter(attempt, 1000, 60000);
// Use the larger value — respect server guidance
return Math.max(retryAfterMs, backoffMs);
}// WRONG: Fixed 1-second retry ignoring Retry-After header
async function fixedRetry<T>(
client: AxiosInstance,
config: AxiosRequestConfig
): Promise<T> {
try {
const response = await client.request<T>(config);
return response.data;
} catch {
// Always waits 1 second regardless of Retry-After header
// If Retry-After says 60 seconds, this will fail again and again
await new Promise((resolve) => setTimeout(resolve, 1000));
const response = await client.request<T>(config);
return response.data;
}
}while(true)setIntervalsetTimeoutwhile(true)for(;;)setInterval// Correct: Controlled polling with adequate intervals
async function pollWithBackpressure(
client: AxiosInstance,
intervalMs: number = 30000 // 30 seconds minimum
): Promise<void> {
const poll = async (): Promise<void> => {
try {
const response = await client.get("/api/orders/feed");
const events = response.data;
if (events.length > 0) {
await processEvents(events);
await commitEvents(
client,
events.map((e: { handle: string }) => e.handle)
);
}
} catch (error: unknown) {
if (axios.isAxiosError(error) && error.response?.status === 429) {
const retryAfter = parseInt(
error.response.headers["retry-after"] || "60",
10
);
console.warn(`Rate limited, waiting ${retryAfter}s`);
await new Promise((resolve) => setTimeout(resolve, retryAfter * 1000));
return;
}
console.error("Polling error:", error);
}
// Schedule next poll
setTimeout(poll, intervalMs);
};
// Start polling
await poll();
}
async function processEvents(events: unknown[]): Promise<void> {
console.log(`Processing ${events.length} events`);
}
async function commitEvents(client: AxiosInstance, handles: string[]): Promise<void> {
await client.post("/api/orders/feed", { handles });
}// WRONG: Tight loop with no backpressure
async function tightLoop(client: AxiosInstance): Promise<void> {
while (true) {
try {
const response = await client.get("/api/orders/feed");
await processEvents(response.data);
} catch {
// Immediate retry — no delay, burns through rate limits
continue;
}
}
}
// WRONG: setInterval with 1-second polling
setInterval(async () => {
// 1 request/second = 3600/hour — will trigger rate limits quickly
const client = createClient();
await client.get("/api/catalog_system/pvt/sku/stockkeepingunitids");
}, 1000);
function createClient(): AxiosInstance {
return axios.create({ baseURL: "https://account.vtexcommercestable.com.br" });
}import axios, { AxiosInstance, AxiosRequestConfig, AxiosResponse } from "axios";
interface RateLimitedClientConfig {
accountName: string;
appKey: string;
appToken: string;
maxRetries?: number;
baseDelayMs?: number;
maxDelayMs?: number;
}
function createRateLimitedClient(config: RateLimitedClientConfig): {
client: AxiosInstance;
request: <T>(requestConfig: AxiosRequestConfig) => Promise<AxiosResponse<T>>;
} {
const client = axios.create({
baseURL: `https://${config.accountName}.vtexcommercestable.com.br`,
headers: {
"Content-Type": "application/json",
"X-VTEX-API-AppKey": config.appKey,
"X-VTEX-API-AppToken": config.appToken,
},
timeout: 30000,
});
const retryConfig: RetryConfig = {
maxRetries: config.maxRetries ?? 5,
baseDelayMs: config.baseDelayMs ?? 1000,
maxDelayMs: config.maxDelayMs ?? 60000,
};
return {
client,
request: <T>(requestConfig: AxiosRequestConfig) =>
requestWithRetry<T>(client, requestConfig, retryConfig),
};
}enum CircuitState {
CLOSED = "CLOSED", // Normal operation — requests flow through
OPEN = "OPEN", // Service failing — requests blocked
HALF_OPEN = "HALF_OPEN", // Testing recovery — one request allowed
}
class CircuitBreaker {
private state: CircuitState = CircuitState.CLOSED;
private failureCount: number = 0;
private lastFailureTime: number = 0;
private successCount: number = 0;
constructor(
private readonly failureThreshold: number = 5,
private readonly recoveryTimeMs: number = 30000,
private readonly halfOpenSuccessThreshold: number = 3
) {}
async execute<T>(operation: () => Promise<T>): Promise<T> {
if (this.state === CircuitState.OPEN) {
if (Date.now() - this.lastFailureTime < this.recoveryTimeMs) {
throw new Error(
`Circuit breaker is OPEN. Retry after ${this.recoveryTimeMs}ms.`
);
}
// Transition to half-open for a test request
this.state = CircuitState.HALF_OPEN;
this.successCount = 0;
}
try {
const result = await operation();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
throw error;
}
}
private onSuccess(): void {
if (this.state === CircuitState.HALF_OPEN) {
this.successCount++;
if (this.successCount >= this.halfOpenSuccessThreshold) {
this.state = CircuitState.CLOSED;
this.failureCount = 0;
console.log("Circuit breaker: CLOSED (recovered)");
}
} else {
this.failureCount = 0;
}
}
private onFailure(): void {
this.failureCount++;
this.lastFailureTime = Date.now();
if (this.failureCount >= this.failureThreshold) {
this.state = CircuitState.OPEN;
console.warn(
`Circuit breaker: OPEN after ${this.failureCount} failures`
);
}
}
getState(): CircuitState {
return this.state;
}
}interface QueuedRequest<T> {
config: AxiosRequestConfig;
resolve: (value: AxiosResponse<T>) => void;
reject: (error: Error) => void;
}
class RequestQueue {
private queue: Array<QueuedRequest<unknown>> = [];
private processing: boolean = false;
private readonly requestsPerSecond: number;
private readonly circuitBreaker: CircuitBreaker;
constructor(
private readonly client: {
request: <T>(config: AxiosRequestConfig) => Promise<AxiosResponse<T>>;
},
requestsPerSecond: number = 10,
circuitBreaker?: CircuitBreaker
) {
this.requestsPerSecond = requestsPerSecond;
this.circuitBreaker = circuitBreaker ?? new CircuitBreaker();
}
async enqueue<T>(config: AxiosRequestConfig): Promise<AxiosResponse<T>> {
return new Promise<AxiosResponse<T>>((resolve, reject) => {
this.queue.push({
config,
resolve: resolve as (value: AxiosResponse<unknown>) => void,
reject,
});
this.processQueue();
});
}
private async processQueue(): Promise<void> {
if (this.processing || this.queue.length === 0) {
return;
}
this.processing = true;
const delayBetweenRequests = 1000 / this.requestsPerSecond;
while (this.queue.length > 0) {
const request = this.queue.shift()!;
try {
const result = await this.circuitBreaker.execute(() =>
this.client.request(request.config)
);
request.resolve(result);
} catch (error) {
request.reject(error instanceof Error ? error : new Error(String(error)));
}
// Throttle between requests
if (this.queue.length > 0) {
await new Promise((resolve) =>
setTimeout(resolve, delayBetweenRequests)
);
}
}
this.processing = false;
}
getQueueLength(): number {
return this.queue.length;
}
}import { AxiosResponse } from "axios";
interface RateLimitInfo {
remaining: number | null;
resetAt: number | null;
retryAfter: number | null;
}
function parseRateLimitHeaders(response: AxiosResponse): RateLimitInfo {
return {
remaining: response.headers["x-ratelimit-remaining"]
? parseInt(response.headers["x-ratelimit-remaining"], 10)
: null,
resetAt: response.headers["x-ratelimit-reset"]
? parseInt(response.headers["x-ratelimit-reset"], 10) * 1000
: null,
retryAfter: response.headers["retry-after"]
? parseInt(response.headers["retry-after"], 10) * 1000
: null,
};
}
async function adaptiveRequest<T>(
client: AxiosInstance,
config: AxiosRequestConfig,
queue: RequestQueue
): Promise<AxiosResponse<T>> {
const response = await queue.enqueue<T>(config);
const rateInfo = parseRateLimitHeaders(response);
// Proactively slow down when remaining requests are low
if (rateInfo.remaining !== null && rateInfo.remaining < 10) {
console.warn(
`Rate limit approaching: ${rateInfo.remaining} requests remaining. ` +
`Slowing down.`
);
// Add extra delay to reduce pressure
await new Promise((resolve) => setTimeout(resolve, 2000));
}
return response;
}import axios from "axios";
async function buildResilientIntegration(): Promise<void> {
const { client, request } = createRateLimitedClient({
accountName: process.env.VTEX_ACCOUNT_NAME!,
appKey: process.env.VTEX_APP_KEY!,
appToken: process.env.VTEX_APP_TOKEN!,
maxRetries: 5,
baseDelayMs: 1000,
maxDelayMs: 60000,
});
const circuitBreaker = new CircuitBreaker(
5, // Open after 5 failures
30000, // Wait 30s before testing recovery
3 // Close after 3 successful half-open requests
);
const queue = new RequestQueue({ request }, 10, circuitBreaker);
// Example: Batch update prices with rate limiting
const skuIds = ["sku-1", "sku-2", "sku-3", "sku-4", "sku-5"];
for (const skuId of skuIds) {
try {
const response = await queue.enqueue({
method: "POST",
url: `/notificator/seller01/changenotification/${skuId}/price`,
});
const rateInfo = parseRateLimitHeaders(response);
if (rateInfo.remaining !== null && rateInfo.remaining < 5) {
console.warn("Approaching rate limit, adding delay");
await new Promise((resolve) => setTimeout(resolve, 5000));
}
} catch (error) {
if (error instanceof Error && error.message.includes("Circuit breaker is OPEN")) {
console.error("Circuit breaker open — pausing all requests");
await new Promise((resolve) => setTimeout(resolve, 30000));
} else {
console.error(`Failed to update price for ${skuId}:`, error);
}
}
}
}// Correct: Exponential backoff with jitter
function getRetryDelay(attempt: number): number {
const baseDelay = 1000;
const maxDelay = 60000;
const exponential = baseDelay * Math.pow(2, attempt);
const bounded = Math.min(maxDelay, exponential);
const jitter = 0.5 + Math.random() * 0.5;
return Math.floor(bounded * jitter);
}
// attempt 0: ~500-1000ms
// attempt 1: ~1000-2000ms
// attempt 2: ~2000-4000ms
// attempt 3: ~4000-8000ms
// attempt 4: ~8000-16000msX-RateLimit-Remaining// Correct: Proactive rate management
async function proactiveRateManagement(
client: AxiosInstance,
requests: AxiosRequestConfig[]
): Promise<void> {
let delayBetweenRequests = 100; // Start at 100ms between requests
for (const config of requests) {
const response = await requestWithRetry(client, config);
const rateInfo = parseRateLimitHeaders(response);
// Proactively adjust speed based on remaining quota
if (rateInfo.remaining !== null) {
if (rateInfo.remaining < 5) {
delayBetweenRequests = 5000; // Slow down significantly
} else if (rateInfo.remaining < 20) {
delayBetweenRequests = 1000; // Moderate slowdown
} else {
delayBetweenRequests = 100; // Normal speed
}
}
await new Promise((resolve) => setTimeout(resolve, delayBetweenRequests));
}
}Retry-Afterwhile(true)setIntervalX-RateLimit-Remaining