Loading...
Loading...
Expert in Cilium eBPF-based networking and security for Kubernetes. Use for CNI setup, network policies (L3/L4/L7), service mesh, Hubble observability, zero-trust security, and cluster-wide network troubleshooting. Specializes in high-performance, secure cluster networking.
npx skill4agent add martinholovsky/claude-skills-generator cilium-experthubble observehubble status# Default deny all ingress/egress in namespace
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: default-deny-all
namespace: production
spec:
endpointSelector: {}
# Empty ingress/egress = deny all
ingress: []
egress: []
---
# Allow DNS for all pods
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: allow-dns
namespace: production
spec:
endpointSelector: {}
egress:
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: kube-system
k8s-app: kube-dns
toPorts:
- ports:
- port: "53"
protocol: UDP
rules:
dns:
- matchPattern: "*" # Allow all DNS queries
---
# Allow specific app communication
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: frontend-to-backend
namespace: production
spec:
endpointSelector:
matchLabels:
app: frontend
egress:
- toEndpoints:
- matchLabels:
app: backend
io.kubernetes.pod.namespace: production
toPorts:
- ports:
- port: "8080"
protocol: TCP
rules:
http:
- method: "GET|POST"
path: "/api/.*"policyAuditMode: trueapiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: api-gateway-policy
namespace: production
spec:
endpointSelector:
matchLabels:
app: api-gateway
ingress:
- fromEndpoints:
- matchLabels:
app: frontend
toPorts:
- ports:
- port: "8080"
protocol: TCP
rules:
http:
# Only allow specific API endpoints
- method: "GET"
path: "/api/v1/(users|products)/.*"
headers:
- "X-API-Key: .*" # Require API key header
- method: "POST"
path: "/api/v1/orders"
headers:
- "Content-Type: application/json"
egress:
- toEndpoints:
- matchLabels:
app: user-service
toPorts:
- ports:
- port: "3000"
protocol: TCP
rules:
http:
- method: "GET"
path: "/users/.*"
- toFQDNs:
- matchPattern: "*.stripe.com" # Allow Stripe API
toPorts:
- ports:
- port: "443"
protocol: TCP/api/v1/.*apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: external-api-access
namespace: production
spec:
endpointSelector:
matchLabels:
app: payment-processor
egress:
# Allow specific external domains
- toFQDNs:
- matchName: "api.stripe.com"
- matchName: "api.paypal.com"
- matchPattern: "*.amazonaws.com" # AWS services
toPorts:
- ports:
- port: "443"
protocol: TCP
# Allow Kubernetes DNS
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: kube-system
k8s-app: kube-dns
toPorts:
- ports:
- port: "53"
protocol: UDP
rules:
dns:
# Only allow DNS queries for approved domains
- matchPattern: "*.stripe.com"
- matchPattern: "*.paypal.com"
- matchPattern: "*.amazonaws.com"
# Deny all other egress
- toEntities:
- kube-apiserver # Allow API server accesstoFQDNsmatchNamematchPattern# Install Cilium with ClusterMesh enabled
# Cluster 1 (us-east)
helm install cilium cilium/cilium \
--namespace kube-system \
--set cluster.name=us-east \
--set cluster.id=1 \
--set clustermesh.useAPIServer=true \
--set clustermesh.apiserver.service.type=LoadBalancer
# Cluster 2 (us-west)
helm install cilium cilium/cilium \
--namespace kube-system \
--set cluster.name=us-west \
--set cluster.id=2 \
--set clustermesh.useAPIServer=true \
--set clustermesh.apiserver.service.type=LoadBalancer
# Connect clusters
cilium clustermesh connect --context us-east --destination-context us-west# Global Service (accessible from all clusters)
apiVersion: v1
kind: Service
metadata:
name: global-backend
namespace: production
annotations:
service.cilium.io/global: "true"
service.cilium.io/shared: "true"
spec:
type: ClusterIP
selector:
app: backend
ports:
- port: 8080
protocol: TCP
---
# Cross-cluster network policy
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: allow-cross-cluster
namespace: production
spec:
endpointSelector:
matchLabels:
app: frontend
egress:
- toEndpoints:
- matchLabels:
app: backend
io.kubernetes.pod.namespace: production
# Matches pods in ANY connected cluster
toPorts:
- ports:
- port: "8080"
protocol: TCPcluster.idcluster.name# Enable WireGuard encryption
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
enable-wireguard: "true"
enable-wireguard-userspace-fallback: "false"
# Or via Helm
helm upgrade cilium cilium/cilium \
--namespace kube-system \
--reuse-values \
--set encryption.enabled=true \
--set encryption.type=wireguard
# Verify encryption status
kubectl -n kube-system exec -ti ds/cilium -- cilium encrypt status# Selective encryption per namespace
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: encrypted-namespace
namespace: production
annotations:
cilium.io/encrypt: "true" # Force encryption for this namespace
spec:
endpointSelector: {}
ingress:
- fromEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: production
egress:
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: productionhubble observe --verdict ENCRYPTED# Install Hubble
helm upgrade cilium cilium/cilium \
--namespace kube-system \
--reuse-values \
--set hubble.relay.enabled=true \
--set hubble.ui.enabled=true
# Port-forward to Hubble UI
cilium hubble ui
# CLI: Watch flows in real-time
hubble observe --namespace production
# Filter by pod
hubble observe --pod production/frontend-7d4c8b6f9-x2m5k
# Show only dropped flows
hubble observe --verdict DROPPED
# Filter by L7 (HTTP)
hubble observe --protocol http --namespace production
# Show flows to specific service
hubble observe --to-service production/backend
# Show flows with DNS queries
hubble observe --protocol dns --verdict FORWARDED
# Export to JSON for analysis
hubble observe --output json > flows.json
# Check policy verdicts
hubble observe --verdict DENIED --namespace production
# Troubleshoot specific connection
hubble observe \
--from-pod production/frontend-7d4c8b6f9-x2m5k \
--to-pod production/backend-5f8d9c4b2-p7k3n \
--verdict DROPPED--verdict DROPPEDreferences/observability.mdapiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: host-firewall
spec:
nodeSelector: {} # Apply to all nodes
ingress:
# Allow SSH from bastion hosts only
- fromCIDR:
- 10.0.1.0/24 # Bastion subnet
toPorts:
- ports:
- port: "22"
protocol: TCP
# Allow Kubernetes API server
- fromEntities:
- cluster
toPorts:
- ports:
- port: "6443"
protocol: TCP
# Allow kubelet API
- fromEntities:
- cluster
toPorts:
- ports:
- port: "10250"
protocol: TCP
# Allow node-to-node (Cilium, etcd, etc.)
- fromCIDR:
- 10.0.0.0/16 # Node CIDR
toPorts:
- ports:
- port: "4240" # Cilium health
protocol: TCP
- port: "4244" # Hubble server
protocol: TCP
# Allow monitoring
- fromEndpoints:
- matchLabels:
k8s:io.kubernetes.pod.namespace: monitoring
toPorts:
- ports:
- port: "9090" # Node exporter
protocol: TCP
egress:
# Allow all egress from nodes (can be restricted)
- toEntities:
- allCiliumClusterwideNetworkPolicyhubble observe --from-reserved:host# 1. Default deny all traffic in namespace
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: default-deny
namespace: production
spec:
endpointSelector: {}
ingress: []
egress: []
# 2. Identity-based allow (not CIDR-based)
---
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: allow-by-identity
namespace: production
spec:
endpointSelector:
matchLabels:
app: web
ingress:
- fromEndpoints:
- matchLabels:
app: frontend
env: production # Require specific identity
# 3. Audit mode for testing
---
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: audit-mode-policy
namespace: production
annotations:
cilium.io/policy-audit-mode: "true"
spec:
# Policy logged but not enforced# Isolate tenants by namespace
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: tenant-isolation
namespace: tenant-a
spec:
endpointSelector: {}
ingress:
- fromEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: tenant-a # Same namespace only
egress:
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: tenant-a
- toEntities:
- kube-apiserver
- kube-dns# Prevent dev from accessing prod
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: env-isolation
spec:
endpointSelector:
matchLabels:
env: production
ingress:
- fromEndpoints:
- matchLabels:
env: production # Only prod can talk to prod
ingressDeny:
- fromEndpoints:
- matchLabels:
env: development # Explicit deny from devhelm upgrade cilium cilium/cilium \
--namespace kube-system \
--reuse-values \
--set authentication.mutual.spire.enabled=true \
--set authentication.mutual.spire.install.enabled=trueapiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: mtls-required
namespace: production
spec:
endpointSelector:
matchLabels:
app: payment-service
ingress:
- fromEndpoints:
- matchLabels:
app: api-gateway
authentication:
mode: "required" # Require mTLS authenticationreferences/network-policies.mdreferences/observability.md# Create connectivity test before implementing policy
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: connectivity-test-client
namespace: test-ns
labels:
app: test-client
spec:
containers:
- name: curl
image: curlimages/curl:latest
command: ["sleep", "infinity"]
EOF
# Test that should fail after policy is applied
kubectl exec -n test-ns connectivity-test-client -- \
curl -s --connect-timeout 5 http://backend-svc:8080/health
# Expected: Connection should succeed (no policy yet)
# After applying deny policy, this should fail
kubectl exec -n test-ns connectivity-test-client -- \
curl -s --connect-timeout 5 http://backend-svc:8080/health
# Expected: Connection refused/timeout# Apply the network policy
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: backend-policy
namespace: test-ns
spec:
endpointSelector:
matchLabels:
app: backend
ingress:
- fromEndpoints:
- matchLabels:
app: frontend # Only frontend allowed, not test-client
toPorts:
- ports:
- port: "8080"
protocol: TCP# Run comprehensive connectivity test
cilium connectivity test --test-namespace=cilium-test
# Verify specific policy enforcement
hubble observe --namespace test-ns --verdict DROPPED \
--from-label app=test-client --to-label app=backend
# Check policy status
cilium policy get -n test-ns# Validate Cilium agent health
kubectl -n kube-system exec ds/cilium -- cilium status
# Verify all endpoints have identity
cilium endpoint list
# Check BPF policy map
kubectl -n kube-system exec ds/cilium -- cilium bpf policy get --all
# Validate no unexpected drops
hubble observe --verdict DROPPED --last 100 | grep -v "expected"
# Helm test for installation validation
helm test cilium -n kube-system# Test Cilium installation integrity
helm test cilium --namespace kube-system --logs
# Validate values before upgrade
helm template cilium cilium/cilium \
--namespace kube-system \
--values values.yaml \
--validate
# Dry-run upgrade
helm upgrade cilium cilium/cilium \
--namespace kube-system \
--values values.yaml \
--dry-run# BAD: Multiple label matches with regex-like behavior
spec:
endpointSelector:
matchExpressions:
- key: app
operator: In
values: [frontend-v1, frontend-v2, frontend-v3, frontend-v4]
- key: version
operator: NotIn
values: [deprecated, legacy]# GOOD: Single label with aggregated selector
spec:
endpointSelector:
matchLabels:
app: frontend
tier: web # Use aggregated label instead of version list# BAD: CIDR-based rules require per-packet evaluation
egress:
- toCIDR:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16# GOOD: Identity-based selectors use efficient BPF map lookups
egress:
- toEndpoints:
- matchLabels:
app: backend
io.kubernetes.pod.namespace: production
- toEntities:
- cluster # Pre-cached entity# BAD: Cross-node DNS queries add latency
# Default CoreDNS deployment# GOOD: Enable node-local DNS in Cilium
helm upgrade cilium cilium/cilium \
--namespace kube-system \
--reuse-values \
--set nodeLocalDNS.enabled=true
# Or use Cilium's DNS proxy with caching
--set dnsproxy.enableDNSCompression=true \
--set dnsproxy.endpointMaxIpPerHostname=50# BAD: 100% sampling causes high CPU/memory usage
hubble:
metrics:
enabled: true
relay:
enabled: true
# Default: all flows captured# GOOD: Sample flows in production
hubble:
metrics:
enabled: true
serviceMonitor:
enabled: true
relay:
enabled: true
prometheus:
enabled: true
# Reduce cardinality
redact:
enabled: true
httpURLQuery: true
httpHeaders:
allow:
- "Content-Type"
# Use selective flow export
hubble:
export:
static:
enabled: true
filePath: /var/run/cilium/hubble/events.log
fieldMask:
- time
- verdict
- drop_reason
- source.namespace
- destination.namespace# BAD: L7 parsing on all pods causes high overhead
spec:
endpointSelector: {} # All pods
ingress:
- toPorts:
- ports:
- port: "8080"
rules:
http:
- method: ".*"# GOOD: L7 only on services that need it
spec:
endpointSelector:
matchLabels:
app: api-gateway # Only on gateway
requires-l7: "true"
ingress:
- fromEndpoints:
- matchLabels:
app: frontend
toPorts:
- ports:
- port: "8080"
rules:
http:
- method: "GET|POST"
path: "/api/v1/.*"# BAD: Default may be too small for high-connection workloads
# Can cause connection failures# GOOD: Adjust for cluster size
helm upgrade cilium cilium/cilium \
--namespace kube-system \
--reuse-values \
--set bpf.ctTcpMax=524288 \
--set bpf.ctAnyMax=262144 \
--set bpf.natMax=524288 \
--set bpf.policyMapMax=65536#!/bin/bash
# test-network-policies.sh
set -e
NAMESPACE="policy-test"
# Setup test namespace
kubectl create namespace $NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
# Deploy test pods
kubectl apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: client
namespace: $NAMESPACE
labels:
app: client
spec:
containers:
- name: curl
image: curlimages/curl:latest
command: ["sleep", "infinity"]
---
apiVersion: v1
kind: Pod
metadata:
name: server
namespace: $NAMESPACE
labels:
app: server
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
EOF
# Wait for pods
kubectl wait --for=condition=Ready pod/client pod/server -n $NAMESPACE --timeout=60s
# Test 1: Baseline connectivity (should pass)
echo "Test 1: Baseline connectivity..."
SERVER_IP=$(kubectl get pod server -n $NAMESPACE -o jsonpath='{.status.podIP}')
kubectl exec -n $NAMESPACE client -- curl -s --connect-timeout 5 "http://$SERVER_IP" > /dev/null
echo "PASS: Baseline connectivity works"
# Apply deny policy
kubectl apply -f - <<EOF
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: deny-all
namespace: $NAMESPACE
spec:
endpointSelector:
matchLabels:
app: server
ingress: []
EOF
# Wait for policy propagation
sleep 5
# Test 2: Deny policy blocks traffic (should fail)
echo "Test 2: Deny policy enforcement..."
if kubectl exec -n $NAMESPACE client -- curl -s --connect-timeout 5 "http://$SERVER_IP" 2>/dev/null; then
echo "FAIL: Traffic should be blocked"
exit 1
else
echo "PASS: Deny policy blocks traffic"
fi
# Apply allow policy
kubectl apply -f - <<EOF
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: allow-client
namespace: $NAMESPACE
spec:
endpointSelector:
matchLabels:
app: server
ingress:
- fromEndpoints:
- matchLabels:
app: client
toPorts:
- ports:
- port: "80"
protocol: TCP
EOF
sleep 5
# Test 3: Allow policy permits traffic (should pass)
echo "Test 3: Allow policy enforcement..."
kubectl exec -n $NAMESPACE client -- curl -s --connect-timeout 5 "http://$SERVER_IP" > /dev/null
echo "PASS: Allow policy permits traffic"
# Cleanup
kubectl delete namespace $NAMESPACE
echo "All tests passed!"#!/bin/bash
# test-hubble-flows.sh
# Verify Hubble is capturing flows
echo "Checking Hubble flow capture..."
# Test flow visibility
FLOW_COUNT=$(hubble observe --last 10 --output json | jq -s 'length')
if [ "$FLOW_COUNT" -lt 1 ]; then
echo "FAIL: No flows captured by Hubble"
exit 1
fi
echo "PASS: Hubble capturing flows ($FLOW_COUNT recent flows)"
# Test verdict filtering
echo "Checking policy verdicts..."
hubble observe --verdict FORWARDED --last 5 --output json | jq -e '.' > /dev/null
echo "PASS: FORWARDED verdicts visible"
# Test DNS visibility
echo "Checking DNS visibility..."
hubble observe --protocol dns --last 5 --output json | jq -e '.' > /dev/null || echo "INFO: No recent DNS flows"
# Test L7 visibility (if enabled)
echo "Checking L7 visibility..."
hubble observe --protocol http --last 5 --output json | jq -e '.' > /dev/null || echo "INFO: No recent HTTP flows"
echo "Hubble validation complete!"#!/bin/bash
# test-cilium-health.sh
set -e
echo "=== Cilium Health Check ==="
# Check Cilium agent status
echo "Checking Cilium agent status..."
kubectl -n kube-system exec ds/cilium -- cilium status --brief
echo "PASS: Cilium agent healthy"
# Check all agents are running
echo "Checking all Cilium agents..."
DESIRED=$(kubectl get ds cilium -n kube-system -o jsonpath='{.status.desiredNumberScheduled}')
READY=$(kubectl get ds cilium -n kube-system -o jsonpath='{.status.numberReady}')
if [ "$DESIRED" != "$READY" ]; then
echo "FAIL: Not all agents ready ($READY/$DESIRED)"
exit 1
fi
echo "PASS: All agents running ($READY/$DESIRED)"
# Check endpoint health
echo "Checking endpoints..."
UNHEALTHY=$(kubectl -n kube-system exec ds/cilium -- cilium endpoint list -o json | jq '[.[] | select(.status.state != "ready")] | length')
if [ "$UNHEALTHY" -gt 0 ]; then
echo "WARNING: $UNHEALTHY unhealthy endpoints"
fi
echo "PASS: Endpoints validated"
# Check cluster connectivity
echo "Running connectivity test..."
cilium connectivity test --test-namespace=cilium-test --single-node
echo "PASS: Connectivity test passed"
echo "=== All health checks passed ==="# No network policies = all traffic allowed!
# Attackers can move laterally freelyapiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: default-deny
namespace: production
spec:
endpointSelector: {}
ingress: []
egress: []# Pods can't resolve DNS names!
egress: []egress:
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: kube-system
k8s-app: kube-dns
toPorts:
- ports:
- port: "53"
protocol: UDPegress:
- toCIDR:
- 10.0.1.42/32 # Pod IP - will break when pod restartsegress:
- toEndpoints:
- matchLabels:
app: backend
version: v2# No audit mode - might break production traffic
spec:
endpointSelector: {...}
ingress: [...]metadata:
annotations:
cilium.io/policy-audit-mode: "true"
spec:
endpointSelector: {...}
ingress: [...]
# Review Hubble logs for AUDIT verdicts
# Remove annotation when ready to enforcetoFQDNs:
- matchPattern: "*.com" # Allows ANY .com domain!toFQDNs:
- matchName: "api.stripe.com"
- matchPattern: "*.stripe.com" # Only Stripe subdomains# Can't see why traffic is being dropped!
# Blind troubleshooting with kubectl logshelm upgrade cilium cilium/cilium \
--set hubble.relay.enabled=true \
--set hubble.ui.enabled=true
# Troubleshoot with visibility
hubble observe --verdict DROPPED# Alert on policy denies
hubble observe --verdict DENIED --output json \
| jq -r '.flow | "\(.time) \(.source.namespace)/\(.source.pod_name) -> \(.destination.namespace)/\(.destination.pod_name) DENIED"'
# Export metrics to Prometheus
# Alert on spike in dropped flows# Can cause OOM kills, crashesresources:
limits:
memory: 4Gi # Adjust based on cluster size
cpu: 2
requests:
memory: 2Gi
cpu: 500mcilium versioncilium statuscilium connectivity testcilium.io/policy-audit-mode: "true"toEntities: [kube-apiserver]kubectl get pods -l app=backendcilium connectivity testhubble observe --verdict DROPPEDhelm template --validatecilium statuskubectl -n kube-system get pods -l k8s-app=ciliumreferences/network-policies.mdreferences/observability.md