Loading...
Loading...
S3-compatible object storage for files, images, and large data. Load when handling file uploads, storing images/videos/documents, generating presigned URLs, using multipart uploads for large files, migrating from S3, or serving static assets from buckets.
npx skill4agent add null-shot/cloudflare-skills r2-storage# Create bucket
wrangler r2 bucket create my-bucket
# Create with location hint
wrangler r2 bucket create my-bucket --location wnam
# List buckets
wrangler r2 bucket list| Operation | API |
|---|---|
| Upload object | |
| Download object | |
| Delete object | |
| List objects | |
| Get metadata | |
| Multipart upload | |
| Generate signed URL | Use presigned URL patterns with R2's S3 compatibility |
// wrangler.jsonc
{
"name": "my-worker",
"main": "src/index.ts",
"compatibility_date": "2026-01-01",
"r2_buckets": [
{
"binding": "BUCKET",
"bucket_name": "my-bucket"
}
]
}wrangler typesexport interface Env {
BUCKET: R2Bucket;
}import { R2Bucket } from "@cloudflare/workers-types";
export interface Env {
BUCKET: R2Bucket;
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const url = new URL(request.url);
const key = url.pathname.slice(1); // Remove leading /
// Upload
if (request.method === "PUT") {
await env.BUCKET.put(key, request.body, {
httpMetadata: {
contentType: request.headers.get("content-type") || "application/octet-stream",
},
});
return new Response("Uploaded", { status: 201 });
}
// Download
if (request.method === "GET") {
const object = await env.BUCKET.get(key);
if (!object) {
return new Response("Not found", { status: 404 });
}
return new Response(object.body, {
headers: {
"Content-Type": object.httpMetadata?.contentType || "application/octet-stream",
"ETag": object.httpEtag,
"Cache-Control": object.httpMetadata?.cacheControl || "public, max-age=3600",
},
});
}
// Delete
if (request.method === "DELETE") {
await env.BUCKET.delete(key);
return new Response("Deleted", { status: 204 });
}
return new Response("Method not allowed", { status: 405 });
},
};export default {
async fetch(request: Request, env: Env): Promise<Response> {
if (request.method !== "POST") {
return new Response("Method not allowed", { status: 405 });
}
const formData = await request.formData();
const file = formData.get("file") as File;
if (!file) {
return new Response("No file provided", { status: 400 });
}
// Generate unique key
const key = `uploads/${crypto.randomUUID()}-${file.name}`;
// Upload to R2
await env.BUCKET.put(key, file.stream(), {
httpMetadata: {
contentType: file.type,
},
customMetadata: {
originalName: file.name,
uploadedAt: new Date().toISOString(),
},
});
return Response.json({
success: true,
key,
url: `/files/${key}`,
});
},
};async function listAllObjects(
bucket: R2Bucket,
prefix: string = ""
): Promise<R2Object[]> {
const objects: R2Object[] = [];
let cursor: string | undefined;
do {
const listed = await bucket.list({
prefix,
cursor,
limit: 1000,
});
objects.push(...listed.objects);
cursor = listed.truncated ? listed.cursor : undefined;
} while (cursor);
return objects;
}
// Usage
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const url = new URL(request.url);
const prefix = url.searchParams.get("prefix") || "";
const objects = await listAllObjects(env.BUCKET, prefix);
return Response.json({
count: objects.length,
objects: objects.map((obj) => ({
key: obj.key,
size: obj.size,
uploaded: obj.uploaded,
})),
});
},
};// Conditional write (only if not modified)
const existingObject = await env.BUCKET.head("config.json");
if (existingObject) {
// Update only if ETag matches
await env.BUCKET.put("config.json", newData, {
httpMetadata: {
contentType: "application/json",
},
onlyIf: {
etagMatches: existingObject.httpEtag,
},
});
}
// Conditional read (If-None-Match)
const object = await env.BUCKET.get("image.jpg", {
onlyIf: {
etagDoesNotMatch: cachedEtag,
},
});
if (object === null) {
// Object not modified - return 304
return new Response(null, {
status: 304,
headers: { "ETag": cachedEtag },
});
}// Store with custom metadata
await env.BUCKET.put("document.pdf", pdfData, {
httpMetadata: {
contentType: "application/pdf",
},
customMetadata: {
userId: "user-123",
documentType: "invoice",
version: "2",
tags: "finance,2024",
},
});
// Read metadata without downloading body
const object = await env.BUCKET.head("document.pdf");
console.log(object.customMetadata?.userId); // "user-123"export default {
async fetch(request: Request, env: Env): Promise<Response> {
const key = new URL(request.url).pathname.slice(1);
const rangeHeader = request.headers.get("range");
if (rangeHeader) {
// Parse range: "bytes=0-1023"
const match = rangeHeader.match(/bytes=(\d+)-(\d*)/);
if (match) {
const start = parseInt(match[1], 10);
const end = match[2] ? parseInt(match[2], 10) : undefined;
const object = await env.BUCKET.get(key, {
range: { offset: start, length: end ? end - start + 1 : undefined },
});
if (!object) {
return new Response("Not found", { status: 404 });
}
return new Response(object.body, {
status: 206,
headers: {
"Content-Type": object.httpMetadata?.contentType || "application/octet-stream",
"Content-Range": `bytes ${start}-${end || object.size - 1}/${object.size}`,
"Content-Length": object.size.toString(),
},
});
}
}
// Regular full download
const object = await env.BUCKET.get(key);
if (!object) {
return new Response("Not found", { status: 404 });
}
return new Response(object.body, {
headers: {
"Content-Type": object.httpMetadata?.contentType || "application/octet-stream",
},
});
},
};npm install @aws-sdk/client-s3 @aws-sdk/s3-request-presigner{
"compatibility_flags": ["nodejs_compat_v2"],
"r2_buckets": [
{ "binding": "BUCKET", "bucket_name": "my-bucket" }
]
}import { S3Client, PutObjectCommand, GetObjectCommand } from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
export interface Env {
BUCKET: R2Bucket;
R2_ACCESS_KEY_ID: string;
R2_SECRET_ACCESS_KEY: string;
R2_ACCOUNT_ID: string;
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
// Create S3 client
const s3 = new S3Client({
region: "auto",
endpoint: `https://${env.R2_ACCOUNT_ID}.r2.cloudflarestorage.com`,
credentials: {
accessKeyId: env.R2_ACCESS_KEY_ID,
secretAccessKey: env.R2_SECRET_ACCESS_KEY,
},
});
// Upload using S3 API
await s3.send(
new PutObjectCommand({
Bucket: "my-bucket",
Key: "file.txt",
Body: "Hello R2",
ContentType: "text/plain",
})
);
// Generate presigned URL (valid for 1 hour)
const command = new GetObjectCommand({
Bucket: "my-bucket",
Key: "file.txt",
});
const signedUrl = await getSignedUrl(s3, command, { expiresIn: 3600 });
return Response.json({ signedUrl });
},
};import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
export interface Env {
R2_ACCESS_KEY_ID: string;
R2_SECRET_ACCESS_KEY: string;
R2_ACCOUNT_ID: string;
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
const url = new URL(request.url);
// Generate presigned upload URL
if (url.pathname === "/upload-url") {
const filename = url.searchParams.get("filename");
if (!filename) {
return new Response("Missing filename", { status: 400 });
}
const s3 = new S3Client({
region: "auto",
endpoint: `https://${env.R2_ACCOUNT_ID}.r2.cloudflarestorage.com`,
credentials: {
accessKeyId: env.R2_ACCESS_KEY_ID,
secretAccessKey: env.R2_SECRET_ACCESS_KEY,
},
});
const key = `uploads/${crypto.randomUUID()}-${filename}`;
const command = new PutObjectCommand({
Bucket: "my-bucket",
Key: key,
});
const signedUrl = await getSignedUrl(s3, command, { expiresIn: 300 }); // 5 minutes
return Response.json({
uploadUrl: signedUrl,
key,
});
}
return new Response("Not found", { status: 404 });
},
};// 1. Get presigned URL from your Worker
const response = await fetch("/upload-url?filename=photo.jpg");
const { uploadUrl, key } = await response.json();
// 2. Upload file directly to R2
const file = document.querySelector('input[type="file"]').files[0];
await fetch(uploadUrl, {
method: "PUT",
body: file,
headers: {
"Content-Type": file.type,
},
});
// 3. File is now available at key in R2// Initiate multipart upload
const multipartUpload = await env.BUCKET.createMultipartUpload(key, {
httpMetadata: { contentType: "application/zip" }
});
const uploadedParts: R2UploadedPart[] = [];
const chunkSize = 10 * 1024 * 1024; // 10MB chunks
// Upload parts
for (let offset = 0; offset < data.byteLength; offset += chunkSize) {
const chunk = data.slice(offset, Math.min(offset + chunkSize, data.byteLength));
const part = await multipartUpload.uploadPart(partNumber++, chunk);
uploadedParts.push(part);
}
// Complete upload
const object = await multipartUpload.complete(uploadedParts);httpMetadata.contentTypecacheControlbucket.get()null