Loading...
Loading...
High-performance RLHF framework with Ray+vLLM acceleration. Use for PPO, GRPO, RLOO, DPO training of large models (7B-70B+). Built on Ray, vLLM, ZeRO-3. 2× faster than DeepSpeedChat with distributed architecture and GPU resource sharing.
npx skill4agent add davila7/claude-code-templates openrlhf-training# Launch Docker container
docker run --runtime=nvidia -it --rm --shm-size="10g" --cap-add=SYS_ADMIN \
-v $PWD:/openrlhf nvcr.io/nvidia/pytorch:25.02-py3 bash
# Uninstall conflicts
sudo pip uninstall xgboost transformer_engine flash_attn pynvml -y
# Install OpenRLHF with vLLM
pip install openrlhf[vllm]ray start --head --node-ip-address 0.0.0.0 --num-gpus 8
ray job submit --address="http://127.0.0.1:8265" \
--runtime-env-json='{"working_dir": "/openrlhf"}' \
-- python3 -m openrlhf.cli.train_ppo_ray \
--ref_num_nodes 1 --ref_num_gpus_per_node 8 \
--reward_num_nodes 1 --reward_num_gpus_per_node 8 \
--critic_num_nodes 1 --critic_num_gpus_per_node 8 \
--actor_num_nodes 1 --actor_num_gpus_per_node 8 \
--vllm_num_engines 4 --vllm_tensor_parallel_size 2 \
--colocate_all_models \
--vllm_gpu_memory_utilization 0.5 \
--pretrain OpenRLHF/Llama-3-8b-sft-mixture \
--reward_pretrain OpenRLHF/Llama-3-8b-rm-700k \
--save_path ./output/llama3-8b-rlhf \
--micro_train_batch_size 8 --train_batch_size 128 \
--micro_rollout_batch_size 16 --rollout_batch_size 1024 \
--max_epochs 1 --prompt_max_len 1024 --generate_max_len 1024 \
--zero_stage 3 --bf16 \
--actor_learning_rate 5e-7 --critic_learning_rate 9e-6 \
--init_kl_coef 0.01 --normalize_reward \
--gradient_checkpointing --packing_samples \
--vllm_enable_sleep --deepspeed_enable_sleep# Same command as PPO, but add:
--advantage_estimator group_normdeepspeed --module openrlhf.cli.train_rm \
--save_path ./output/llama3-8b-rm \
--save_steps -1 --logging_steps 1 \
--eval_steps -1 --train_batch_size 256 \
--micro_train_batch_size 1 --pretrain meta-llama/Meta-Llama-3-8B \
--bf16 --max_epochs 1 --max_len 8192 \
--zero_stage 3 --learning_rate 9e-6 \
--dataset OpenRLHF/preference_dataset_mixture2_and_safe_pku \
--apply_chat_template --chosen_key chosen \
--rejected_key rejected --flash_attn --gradient_checkpointingray start --head --node-ip-address 0.0.0.0 --num-gpus 8
ray job submit --address="http://127.0.0.1:8265" \
-- python3 -m openrlhf.cli.train_ppo_ray \
--ref_num_nodes 1 --ref_num_gpus_per_node 8 \
--reward_num_nodes 1 --reward_num_gpus_per_node 8 \
--critic_num_nodes 1 --critic_num_gpus_per_node 8 \
--actor_num_nodes 1 --actor_num_gpus_per_node 8 \
--vllm_num_engines 4 --vllm_tensor_parallel_size 2 \
--colocate_all_models \
--pretrain OpenRLHF/Llama-3-8b-sft-mixture \
--reward_pretrain ./output/llama3-8b-rm \
--save_path ./output/llama3-8b-ppo \
--micro_train_batch_size 8 --train_batch_size 128 \
--micro_rollout_batch_size 16 --rollout_batch_size 1024 \
--max_epochs 1 --prompt_max_len 1024 --generate_max_len 1024 \
--zero_stage 3 --bf16 \
--actor_learning_rate 5e-7 --critic_learning_rate 9e-6 \
--init_kl_coef 0.01 --normalize_reward \
--vllm_enable_sleep --deepspeed_enable_sleepray job submit --address="http://127.0.0.1:8265" \
-- python3 -m openrlhf.cli.train_ppo_ray \
--advantage_estimator group_norm \
--ref_num_nodes 1 --ref_num_gpus_per_node 8 \
--reward_num_nodes 1 --reward_num_gpus_per_node 8 \
--actor_num_nodes 1 --actor_num_gpus_per_node 8 \
--vllm_num_engines 4 --vllm_tensor_parallel_size 2 \
--colocate_all_models \
--pretrain OpenRLHF/Llama-3-8b-sft-mixture \
--reward_pretrain OpenRLHF/Llama-3-8b-rm-700k \
--save_path ./output/llama3-8b-grpo \
--micro_train_batch_size 8 --train_batch_size 128 \
--micro_rollout_batch_size 16 --rollout_batch_size 1024 \
--max_epochs 1 --bf16 \
--actor_learning_rate 5e-7 \
--init_kl_coef 0.01 --use_kl_loss --kl_estimator k3 \
--normalize_reward --no_advantage_std_norm--advantage_estimator group_norm--use_kl_loss--kl_estimator k3--no_advantage_std_normdeepspeed --module openrlhf.cli.train_dpo \
--save_path ./output/llama3-8b-dpo \
--save_steps -1 --logging_steps 1 \
--eval_steps -1 --train_batch_size 256 \
--micro_train_batch_size 2 --pretrain meta-llama/Meta-Llama-3-8B \
--bf16 --max_epochs 1 --max_len 8192 \
--zero_stage 3 --learning_rate 5e-7 --beta 0.1 \
--dataset OpenRLHF/preference_dataset_mixture2_and_safe_pku \
--apply_chat_template --chosen_key chosen \
--rejected_key rejected --flash_attn --gradient_checkpointing# Remove --colocate_all_models flag
# Allocate separate GPUs for each model
--actor_num_gpus_per_node 8 \
--critic_num_gpus_per_node 8 \
--reward_num_gpus_per_node 8 \
--ref_num_gpus_per_node 8export RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES=1--colocate_all_models \
--vllm_enable_sleep \
--deepspeed_enable_sleep--init_kl_coef 0.05 # Increase from 0.01--vllm_num_engines 4 \
--vllm_tensor_parallel_size 2 \
--vllm_gpu_memory_utilization 0.5