Loading...
Loading...
Companion CLIs for Runpod workflows — HuggingFace, GitHub, Docker, and AWS.
npx skill4agent add runpod/skills companion-clishfghdockerawswsl --installhfmeta-llama/Llama-3.1-8B# macOS / Linux (standalone installer — recommended)
curl -LsSf https://hf.co/cli/install.sh | bash
# macOS (Homebrew)
brew install hf
# Windows (WSL2): use the Linux standalone installer aboveNote:installs the older Python CLI (pip install huggingface_hub), which uses different command syntax. The commands below are for the standalonehuggingface-cliCLI.hf
# Option 1: interactive login (saves token to ~/.cache/huggingface/token, optionally to git credential store)
hf auth login
# Option 2: non-interactive (pass token directly, useful in scripts and pod start commands)
hf auth login --token $HF_TOKEN --add-to-git-credential
# Option 3: environment variable (takes precedence over saved token; to revert, unset the variable)
export HF_TOKEN=hf_...hf auth whoami # confirm auth and org memberships
hf auth logout # delete all locally stored tokens# Download a model to a local directory (use --local-dir to control where it lands)
hf download meta-llama/Llama-3.1-8B --local-dir ./models/llama-3.1-8b
hf download TinyLlama/TinyLlama-1.1B-Chat-v1.0 --local-dir ./models/tinyllama
# Download a single file from a model repo
hf download meta-llama/Llama-3.1-8B config.json --local-dir ./models/llama-3.1-8b
# Download with glob filters (e.g. only safetensors weights, skip fp16 variants)
hf download stabilityai/stable-diffusion-xl-base-1.0 \
--include "*.safetensors" --exclude "*.fp16.*" \
--local-dir ./models/sdxl
# Download a specific revision (commit hash, branch, or tag — append --revision REF)
hf download meta-llama/Llama-3.1-8B --revision v1.0 --local-dir ./models/llama-3.1-8b# Increase download timeout on slow connections (default: 10s)
export HF_HUB_DOWNLOAD_TIMEOUT=30gh# macOS
brew install gh
# Linux (Debian/Ubuntu)
(type -p wget >/dev/null || (sudo apt update && sudo apt install wget -y)) \
&& sudo mkdir -p -m 755 /etc/apt/keyrings \
&& out=$(mktemp) && wget -nv -O$out https://cli.github.com/packages/githubcli-archive-keyring.gpg \
&& cat $out | sudo tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
&& sudo chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
| sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& sudo apt update && sudo apt install gh -y
# Linux (Alpine)
apk add github-cli
# Windows (WSL2): use the Linux (Debian/Ubuntu) installer aboveghssh-keygen -t ed25519 -C "your_email@example.com"
# Saves to ~/.ssh/id_ed25519 (private) and ~/.ssh/id_ed25519.pub (public)
# Press Enter to accept the default path; set a passphrase or leave blank# macOS
eval "$(ssh-agent -s)"
ssh-add --apple-use-keychain ~/.ssh/id_ed25519
# macOS — also add to ~/.ssh/config so the key loads automatically on login.
# Create the file if it doesn't exist, and add these lines:
#
# Host *
# AddKeysToAgent yes
# UseKeychain yes
# IdentityFile ~/.ssh/id_ed25519
# Linux
eval "$(ssh-agent -s)"
ssh-add ~/.ssh/id_ed25519
# Windows (WSL2): use the Linux instructions above# Interactive login — when prompted, select SSH as the git protocol
gh auth login
# Verify auth
gh auth status# GitHub — upload via gh CLI (requires auth above to be completed first)
gh ssh-key add ~/.ssh/id_ed25519.pub --title "my-machine"
# HuggingFace — paste contents of public key manually in browser
cat ~/.ssh/id_ed25519.pub # copy this output
# Then add at https://huggingface.co/settings/keys# Repositories
gh repo create my-worker --public # create a new public repo (required for Hub)
gh repo clone owner/repo # clone a repository over SSH
gh repo clone owner/repo -- --depth 1 # shallow clone
gh repo view owner/repo # view repo details and URL
# Releases — the Runpod Hub indexes releases, not commits
# Every update to a Hub listing requires a new GitHub release
gh release create v1.0.0 --title "v1.0.0" --notes "Initial release" # create a release
gh release create v1.0.1 --title "v1.0.1" --notes "Update model tag" # update Hub listing
gh release list # list all releases
gh release view v1.0.0 # view release details.runpod/handler.py # serverless worker implementation
Dockerfile # container definition
README.md # documentation shown on Hub listing
.runpod/
hub.json # Hub metadata: title, description, category, GPU config, env vars
tests.json # test cases run after each releasedocker# Linux convenience script (Ubuntu/Debian)
curl -fsSL https://get.docker.com | sh
sudo usermod -aG docker $USER # allow non-root usage (re-login after)docker login -u DOCKERHUB_USERNAME
# When prompted for a password, paste your personal access token~/.docker/config.jsonAlways use explicit semantic version tags. Never rely on.latest
latestv1.0.0v1.0.1latestv1.0.0latestv1.0.0v1.0.1# Correct: explicit semantic version tag
docker build --platform=linux/amd64 -t myorg/myimage:v1.0.0 .
docker push myorg/myimage:v1.0.0
# Wrong: latest tag is ambiguous and unreliable
docker build -t myorg/myimage:latest .username/image:tagdocker loginRunpod currently only supportstype credentials for container registry authentication.docker login
# Build for Runpod (always --platform=linux/amd64 — pods run on x86 Linux)
docker build --platform=linux/amd64 -t myorg/myimage:v1.0.0 .
docker build --platform=linux/amd64 -t myorg/myimage:v1.0.0 -f Dockerfile.prod . # specify Dockerfile
# Tag an existing image before pushing (does not duplicate image data)
docker tag myorg/myimage:v1.0.0 myorg/myimage:v1.0.1
# Push to Docker Hub (image becomes available to Runpod as myorg/myimage:v1.0.0)
docker push myorg/myimage:v1.0.0
# Run locally for validation
docker run --rm -it myorg/myimage:v1.0.0 bash
docker run --rm --gpus all myorg/myimage:v1.0.0 bash # with GPU (requires nvidia-container-toolkit)
docker run --rm -p 8080:80 -e API_KEY=secret myorg/myimage:v1.0.0 # port mapping + env vars
# Debug a running container
docker exec -it CONTAINER_ID /bin/bash
# Inspect
docker images # list local images
docker ps -a # list all containers (including stopped)
docker logs CONTAINER_ID # view container output
docker logs -f CONTAINER_ID # follow logs in real time
# Cleanup
docker rmi myorg/myimage:v1.0.0 # remove an image
docker rm CONTAINER_ID # remove a stopped container# macOS
brew install awscli
# Linux
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o awscliv2.zip
unzip awscliv2.zip && sudo ./aws/installAWS_ACCESS_KEY_IDuser_...AWS_SECRET_ACCESS_KEYrps_...# Option 1: interactive configure (writes ~/.aws/credentials and ~/.aws/config)
# When prompted: enter user ID as access key, S3 API key as secret.
# Press Enter to skip region and output format — region is always passed per-command, not stored in config.
aws configure
aws configure list # verify stored credentials
# Option 2: environment variables (override config files)
export AWS_ACCESS_KEY_ID=user_...
export AWS_SECRET_ACCESS_KEY=rps_...
# To stop using env vars and fall back to config file:
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY--region--endpoint-url--region DATACENTER --endpoint-url https://s3api-DATACENTER.runpod.io/aws s3 ls--region--endpoint-url| Region | Datacenter IDs |
|---|---|
| EU | CZ-1, RO-1, IS-1, NO-1 |
| US | CA-2, GA-2, IL-1, KS-2, MD-1, MO-1, MO-2, NC-1, NC-2, NE-1, WA-1 |
DATACENTERCA-2NETWORK_VOLUME_ID# List files in a volume
aws s3 ls \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/
# List a subdirectory
aws s3 ls \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/my-folder/
# Upload a file
aws s3 cp local-file.txt \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/
# Download a file
aws s3 cp \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/remote-file.txt ./
# Delete a file
aws s3 rm \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/remote-file.txt
# Sync a local directory to a volume
aws s3 sync local-dir/ \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
s3://NETWORK_VOLUME_ID/remote-dir//workspace/my-folder/file.txts3://NETWORK_VOLUME_ID/my-folder/file.txt# Retry on timeout (large transfers)
export AWS_RETRY_MODE=standard
export AWS_MAX_ATTEMPTS=10
# Extend read timeout for large files (seconds)
aws s3 cp large-file.zip \
--region DATACENTER \
--endpoint-url https://s3api-DATACENTER.runpod.io/ \
--cli-read-timeout 7200 \
s3://NETWORK_VOLUME_ID/