Loading...
Loading...
Package and build custom AI models with Cog for deployment on Replicate. Use when creating a cog.yaml or predict.py, defining model inputs and outputs, loading model weights at setup time, building Docker images for ML models, serving locally with cog serve or cog predict, or porting a HuggingFace, GitHub, or ComfyUI model to run on Replicate. Trigger on phrases like "build a model", "package a model", "create a Cog model", "wrap a model", "containerize an AI model", "predict.py", "cog.yaml", "BasePredictor", or "Cog container", and when referencing cog.run, github.com/replicate/cog, or github.com/replicate/cog-examples. Covers GPU and CUDA setup, pget for fast weight downloads, async predictors with continuous batching, streaming outputs, and cold-boot optimization for image, video, audio, and LLM models. For pushing built models to Replicate, see publish-models. For running existing models, see run-models.
npx skill4agent add replicate/skills build-modelscog.yamlcog.yamlpredict.pytrain.pypublish-modelsrun-modelsbrew install replicate/tap/cogsh <(curl -fsSL https://cog.run/install.sh)cog initcog.yamlpredict.pycog.yaml
predict.py
weights.py # optional download helpers
requirements.txt
cog-safe-push-configs/
default.yaml # see publish-models skill
.github/workflows/
ci.yaml
script/ # github.com/github/scripts-to-rule-them-all
lint
test
pushbuild:
gpu: true
cuda: "12.8"
python_version: "3.12"
python_requirements: requirements.txt
system_packages:
- libgl1
- libglib2.0-0
predict: predict.py:Predictorrequirements.txtpython_requirementspython_packagescuda12.8torch==2.7.1+cu128train: train.py:trainimage: r8.im/owner/namecog pushconcurrency:
max: 32from cog import BasePredictor, Input, Path
class Predictor(BasePredictor):
def setup(self) -> None:
"""One-time loads. Heavy work goes here, not in predict()."""
self.model = load_model("weights/")
def predict(
self,
prompt: str = Input(description="Text prompt for generation"),
seed: int = Input(description="Random seed; leave blank for random", default=None),
num_steps: int = Input(description="Number of denoising steps", ge=1, le=50, default=20),
output_format: str = Input(description="Output image format", choices=["webp", "jpg", "png"], default="webp"),
) -> Path:
"""Run a single prediction."""
if not prompt.strip():
raise ValueError("prompt cannot be empty")
out = self.model.generate(prompt, seed=seed, steps=num_steps)
return Path(out)descriptiongelechoices=[...]regex=cog.Pathcog.Secretstrchoicespredict()ValueErrorfrom cog import BasePredictor, Input, ConcatenateIterator
class Predictor(BasePredictor):
def predict(self, prompt: str = Input(description="Prompt")) -> ConcatenateIterator[str]:
for token in self.model.stream(prompt):
yield tokenconcurrency.maxfrom cog import BasePredictor, Input, AsyncConcatenateIterator
class Predictor(BasePredictor):
async def setup(self) -> None:
self.engine = await load_async_engine()
async def predict(
self,
prompt: str = Input(description="Prompt"),
) -> AsyncConcatenateIterator[str]:
async for token in self.engine.generate(prompt):
yield tokenchoicesvoices/from pathlib import Path as _P
AVAILABLE_VOICES = sorted(p.stem for p in _P("voices").glob("*.wav"))
class Predictor(BasePredictor):
def predict(
self,
speaker: str = Input(description="Voice", choices=AVAILABLE_VOICES, default=AVAILABLE_VOICES[0]),
) -> Path: ...import os
os.environ["TORCH_HOME"] = "." # set before importing torch
import torch
from torchvision import modelsimport os
os.environ["HF_HUB_CACHE"] = "./.cache"
os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"cog buildrun:weights.replicate.deliverypgetcog.yamlbuild:
run:
- curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.2/pget_linux_x86_64"
- chmod +x /usr/local/bin/pgetsetup()import subprocess
from pathlib import Path
WEIGHTS_URL = "https://weights.replicate.delivery/default/my-model/weights.tar"
WEIGHTS_DIR = Path("weights")
class Predictor(BasePredictor):
def setup(self) -> None:
if not WEIGHTS_DIR.exists():
# -x extracts tar in-memory; default concurrency is 4 * NumCPU
subprocess.check_call(["pget", "-x", WEIGHTS_URL, str(WEIGHTS_DIR)])
self.model = load_from(WEIGHTS_DIR)manifest = "\n".join([
f"{base}/unet.safetensors weights/unet.safetensors",
f"{base}/vae.safetensors weights/vae.safetensors",
f"{base}/text_encoder.safetensors weights/text_encoder.safetensors",
])
subprocess.run(["pget", "multifile", "-"], input=manifest, text=True, check=True)HF_HUB_ENABLE_HF_TRANSFER=1huggingface_hub.snapshot_downloadfrom_pretrainedcog.Secretimport hashlib, shutil, subprocess
from pathlib import Path
class WeightsDownloadCache:
def __init__(self, cache_dir: str = "/tmp/weights-cache", min_disk_free_gb: int = 10):
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
self.min_disk_free = min_disk_free_gb * 1024**3
def ensure(self, url: str) -> Path:
key = hashlib.sha256(url.encode()).hexdigest()
target = self.cache_dir / key
if target.exists():
target.touch() # bump LRU mtime
return target
self._evict_until_room()
subprocess.check_call(["pget", url, str(target)])
return target
def _evict_until_room(self) -> None:
while shutil.disk_usage(self.cache_dir).free < self.min_disk_free:
entries = sorted(self.cache_dir.iterdir(), key=lambda p: p.stat().st_mtime)
if not entries:
return
entries[0].unlink()replicate/cog-flux/weights.py.safetensorsclass Predictor(BasePredictor):
def setup(self) -> None:
self.pipe = load_base_pipeline()
self.loaded = {"main": None, "extra": None}
def _ensure_lora(self, slot: str, url: str | None) -> None:
if url == self.loaded[slot]:
return
if self.loaded[slot] is not None:
self.pipe.unload_lora_weights(adapter_name=slot)
if url:
path = self.cache.ensure(url)
self.pipe.load_lora_weights(str(path), adapter_name=slot)
self.loaded[slot] = url
def predict(
self,
prompt: str = Input(description="Prompt"),
lora_url: str = Input(description="Primary LoRA URL", default=None),
lora_scale: float = Input(description="Primary LoRA scale", ge=0.0, le=2.0, default=1.0),
extra_lora_url: str = Input(description="Optional second LoRA URL", default=None),
extra_lora_scale: float = Input(description="Second LoRA scale", ge=0.0, le=2.0, default=1.0),
) -> Path:
self._ensure_lora("main", lora_url)
self._ensure_lora("extra", extra_lora_url)
adapters = [s for s, u in self.loaded.items() if u]
scales = [lora_scale if s == "main" else extra_lora_scale for s in adapters]
if adapters:
self.pipe.set_adapters(adapters, adapter_weights=scales)
return Path(self.pipe(prompt).images[0].save("/tmp/out.png"))replicate/cog-fluxreplicate/cog-flux-kontextsetup()import torch
torch.set_float32_matmul_precision("high")
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = Trueself.model = torch.compile(self.model, dynamic=True)
_ = self.predict(prompt="warmup", num_steps=1) # absorbs compile cost in setupassign=Truewith torch.device("meta"):
model = build_model_skeleton()
state = torch.load("weights.pt", map_location="cpu")
model.load_state_dict(state, assign=True)cog init # scaffold cog.yaml + predict.py
cog predict -i prompt="hello" # build + run a single prediction
cog predict -i image=@input.jpg -o out.png # file inputs and outputs
cog serve -p 8393 # HTTP server matching production
cog exec python # interactive shell inside the build envcog build -t my-model
cog build --separate-weights -t my-model # weights in their own image layer
cog build --secret id=hf,src=$HOME/.hf_token -t my-model--separate-weights--mount=type=cache,target=/root/.cache/piprun:--secretARG--use-cog-base-image=truetrain: train.py:traincog.yamltrain()TrainingOutput(weights=Path("model.tar"))setup(self, weights)COG_WEIGHTSreplicate/flux-fine-tunersetup()predict()numpy<2cog.Pathcog.Secretpgetv0.8.2HF_HUB_ENABLE_HF_TRANSFER=1TRANSFORMERS_OFFLINE=1cog predictchoices