Loading...
Loading...
Deploy ML models with FastAPI, Docker, Kubernetes. Use for serving predictions, containerization, monitoring, drift detection, or encountering latency issues, health check failures, version conflicts.
npx skill4agent add secondsky/claude-skills model-deployment| Method | Use Case | Latency |
|---|---|---|
| REST API | Web services | Medium |
| Batch | Large-scale processing | N/A |
| Streaming | Real-time | Low |
| Edge | On-device | Very low |
from fastapi import FastAPI
from pydantic import BaseModel
import joblib
import numpy as np
app = FastAPI()
model = joblib.load('model.pkl')
class PredictionRequest(BaseModel):
features: list[float]
class PredictionResponse(BaseModel):
prediction: float
probability: float
@app.get('/health')
def health():
return {'status': 'healthy'}
@app.post('/predict', response_model=PredictionResponse)
def predict(request: PredictionRequest):
features = np.array(request.features).reshape(1, -1)
prediction = model.predict(features)[0]
probability = model.predict_proba(features)[0].max()
return PredictionResponse(prediction=prediction, probability=probability)FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY model.pkl .
COPY app.py .
EXPOSE 8000
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]class ModelMonitor:
def __init__(self):
self.predictions = []
self.latencies = []
def log_prediction(self, input_data, prediction, latency):
self.predictions.append({
'input': input_data,
'prediction': prediction,
'latency': latency,
'timestamp': datetime.now()
})
def detect_drift(self, reference_distribution):
# Compare current predictions to reference
pass# 1. Save trained model
import joblib
joblib.dump(model, 'model.pkl')
# 2. Create FastAPI app (see references/fastapi-production-server.md)
# app.py with /predict and /health endpoints
# 3. Create Dockerfile
cat > Dockerfile << 'EOF'
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY app.py model.pkl ./
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
EOF
# 4. Build and test locally
docker build -t model-api:v1.0.0 .
docker run -p 8000:8000 model-api:v1.0.0
# 5. Push to registry
docker tag model-api:v1.0.0 registry.example.com/model-api:v1.0.0
docker push registry.example.com/model-api:v1.0.0
# 6. Deploy to Kubernetes
kubectl apply -f deployment.yaml
kubectl rollout status deployment/model-api# app.py
@app.get("/health") # Liveness: Is service alive?
async def health():
return {"status": "healthy"}
@app.get("/ready") # Readiness: Can handle traffic?
async def ready():
try:
_ = model_store.model # Verify model loaded
return {"status": "ready"}
except:
raise HTTPException(503, "Not ready")# deployment.yaml
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /ready
port: 8000
initialDelaySeconds: 5FileNotFoundError: model.pkl# ❌ Wrong: Model in wrong directory
COPY model.pkl /app/models/ # But code expects /app/model.pkl
# ✅ Correct: Consistent paths
COPY model.pkl /models/model.pkl
ENV MODEL_PATH=/models/model.pkl
# In Python:
model_path = os.getenv("MODEL_PATH", "/models/model.pkl")from pydantic import BaseModel, Field, validator
class PredictionRequest(BaseModel):
features: List[float] = Field(..., min_items=1, max_items=100)
@validator('features')
def validate_finite(cls, v):
if not all(np.isfinite(val) for val in v):
raise ValueError("All features must be finite")
return v
# FastAPI auto-validates and returns 422 for invalid requests
@app.post("/predict")
async def predict(request: PredictionRequest):
# Request is guaranteed valid here
passmonitor = ModelMonitor(reference_data=training_data, drift_threshold=0.1)
@app.post("/predict")
async def predict(request: PredictionRequest):
prediction = model.predict(features)
monitor.log_prediction(features, prediction, latency)
# Alert if drift detected
if monitor.should_retrain():
alert_manager.send_alert("Model drift detected - retrain recommended")
return predictionresources:
requests:
memory: "512Mi" # Guaranteed
cpu: "500m"
limits:
memory: "1Gi" # Max allowed
cpu: "1000m"
# Monitor actual usage:
kubectl top pods# Deploy with version tag
kubectl set image deployment/model-api model-api=registry/model-api:v1.2.0
# If issues, rollback to previous
kubectl rollout undo deployment/model-api
# Or specify version
kubectl set image deployment/model-api model-api=registry/model-api:v1.1.0@app.post("/predict/batch")
async def predict_batch(request: BatchPredictionRequest):
# Process all at once (vectorized)
features = np.array(request.instances)
predictions = model.predict(features) # Much faster!
return {"predictions": predictions.tolist()}# .github/workflows/deploy.yml
- name: Validate model performance
run: |
python scripts/validate_model.py \
--model model.pkl \
--test-data test.csv \
--min-accuracy 0.85 # Fail if below thresholdreferences/fastapi-production-server.mdreferences/model-monitoring-drift.mdreferences/containerization-deployment.mdreferences/cicd-ml-models.md