Loading...
Loading...
Educational GPT implementation in ~300 lines. Reproduces GPT-2 (124M) on OpenWebText. Clean, hackable code for learning transformers. By Andrej Karpathy. Perfect for understanding GPT architecture from scratch. Train on Shakespeare (CPU) or OpenWebText (multi-GPU).
npx skill4agent add orchestra-research/ai-research-skills nanogptpip install torch numpy transformers datasets tiktoken wandb tqdm# Prepare data
python data/shakespeare_char/prepare.py
# Train (5 minutes on CPU)
python train.py config/train_shakespeare_char.py
# Generate text
python sample.py --out_dir=out-shakespeare-charROMEO:
What say'st thou? Shall I speak, and be a man?
JULIET:
I am afeard, and yet I'll speak; for thou art
One that hath been a man, and yet I know not
What thou art.# Step 1: Prepare data (creates train.bin, val.bin)
python data/shakespeare_char/prepare.py
# Step 2: Train small model
python train.py config/train_shakespeare_char.py
# Step 3: Generate text
python sample.py --out_dir=out-shakespeare-charconfig/train_shakespeare_char.py# Model config
n_layer = 6 # 6 transformer layers
n_head = 6 # 6 attention heads
n_embd = 384 # 384-dim embeddings
block_size = 256 # 256 char context
# Training config
batch_size = 64
learning_rate = 1e-3
max_iters = 5000
eval_interval = 500
# Hardware
device = 'cpu' # Or 'cuda'
compile = False # Set True for PyTorch 2.0# Step 1: Prepare OpenWebText (takes ~1 hour)
python data/openwebtext/prepare.py
# Step 2: Train GPT-2 124M with DDP (8 GPUs)
torchrun --standalone --nproc_per_node=8 \
train.py config/train_gpt2.py
# Step 3: Sample from trained model
python sample.py --out_dir=outconfig/train_gpt2.py# GPT-2 (124M) architecture
n_layer = 12
n_head = 12
n_embd = 768
block_size = 1024
dropout = 0.0
# Training
batch_size = 12
gradient_accumulation_steps = 5 * 8 # Total batch ~0.5M tokens
learning_rate = 6e-4
max_iters = 600000
lr_decay_iters = 600000
# System
compile = True # PyTorch 2.0# In train.py or config
init_from = 'gpt2' # Options: gpt2, gpt2-medium, gpt2-large, gpt2-xl
# Model loads OpenAI weights automatically
python train.py config/finetune_shakespeare.pyconfig/finetune_shakespeare.py# Start from GPT-2
init_from = 'gpt2'
# Dataset
dataset = 'shakespeare_char'
batch_size = 1
block_size = 1024
# Fine-tuning
learning_rate = 3e-5 # Lower LR for fine-tuning
max_iters = 2000
warmup_iters = 100
# Regularization
weight_decay = 1e-1# data/custom/prepare.py
import numpy as np
# Load your data
with open('my_data.txt', 'r') as f:
text = f.read()
# Create character mappings
chars = sorted(list(set(text)))
stoi = {ch: i for i, ch in enumerate(chars)}
itos = {i: ch for i, ch in enumerate(chars)}
# Tokenize
data = np.array([stoi[ch] for ch in text], dtype=np.uint16)
# Split train/val
n = len(data)
train_data = data[:int(n*0.9)]
val_data = data[int(n*0.9):]
# Save
train_data.tofile('data/custom/train.bin')
val_data.tofile('data/custom/val.bin')python data/custom/prepare.py
python train.py --dataset=custommodel.pytrain.pybatch_size = 1 # Reduce from 12
block_size = 512 # Reduce from 1024
gradient_accumulation_steps = 40 # Increase to maintain effective batchcompile = True # 2× speedupdtype = 'bfloat16' # Or 'float16'max_iters = 10000 # Increase from 5000# In sample.py
temperature = 0.7 # Lower from 1.0
top_k = 200 # Add top-k samplingpip install transformersinit_from = 'gpt2' # Valid: gpt2, gpt2-medium, gpt2-large, gpt2-xlcompile=Truedtype=bfloat16