Loading...
Loading...
Comprehensive Tailscale VPN setup, configuration, and management for mesh networking, secure access, and zero-trust infrastructure. Covers installation, CLI commands, subnet routers, exit nodes, Tailscale SSH, ACL/grants configuration, MagicDNS, Tailscale Serve/Funnel, API automation, and production deployment best practices.
npx skill4agent add el-feo/ai-context tailscaleTrigger Keywords: tailscale, tailnet, wireguard vpn, mesh vpn, tailscale ssh, exit node, subnet router, tailscale acl, magicDNS, tailscale serve, tailscale funnel
curl -fsSL https://tailscale.com/install.sh | shbrew install tailscale# Start Tailscale and authenticate
sudo tailscale up
# Check status
tailscale status
# Get your Tailscale IP
tailscale ip -4
# Connect via MagicDNS hostname
ssh user@machine-name# Connect to your tailnet
tailscale up
# Disconnect but keep daemon running
tailscale down
# Check connection status and peers
tailscale status
# View detailed network map
tailscale status --json | jq
# Ping another tailnet device (TSMP ping)
tailscale ping machine-name
# Test connectivity including ACLs (ICMP ping)
tailscale ping --icmp machine-name# Enable IP forwarding (Linux)
echo 'net.ipv4.ip_forward = 1' | sudo tee -a /etc/sysctl.d/99-tailscale.conf
echo 'net.ipv6.conf.all.forwarding = 1' | sudo tee -a /etc/sysctl.d/99-tailscale.conf
sudo sysctl -p /etc/sysctl.d/99-tailscale.conf
# Advertise routes to your local network
sudo tailscale up --advertise-routes=192.168.1.0/24,10.0.0.0/24# Linux needs explicit flag to accept routes
sudo tailscale up --accept-routes
# Other platforms accept routes automatically# Enable IP forwarding (same as subnet router)
echo 'net.ipv4.ip_forward = 1' | sudo tee -a /etc/sysctl.d/99-tailscale.conf
echo 'net.ipv6.conf.all.forwarding = 1' | sudo tee -a /etc/sysctl.d/99-tailscale.conf
sudo sysctl -p /etc/sysctl.d/99-tailscale.conf
# Advertise as exit node
sudo tailscale up --advertise-exit-node# Use specific exit node
tailscale set --exit-node=exit-node-name
# Use suggested exit node (auto-selects best)
tailscale set --exit-node=auto:any
# Allow LAN access while using exit node
tailscale set --exit-node=exit-node-name --exit-node-allow-lan-access
# Stop using exit node
tailscale set --exit-node=# Enable Tailscale SSH server
sudo tailscale set --ssh{
"grants": [
{
"src": ["user@example.com"],
"dst": ["tag:servers"],
"ip": ["22"]
}
],
"ssh": [
{
"action": "accept",
"src": ["user@example.com"],
"dst": ["tag:servers"],
"users": ["root", "ubuntu", "autogroup:nonroot"]
}
]
}# No special setup needed on client!
ssh machine-name
# Or use specific user
ssh ubuntu@machine-name
# Works with SCP and SFTP too
scp file.txt machine-name:/tmp/{
"ssh": [
{
"action": "check", // Requires recent SSO re-auth
"src": ["user@example.com"],
"dst": ["tag:servers"],
"users": ["root"]
}
]
}# Serve local web server to tailnet
tailscale serve 3000
# Serve specific path
tailscale serve --https=443 --set-path=/app 8080
# Serve static files
tailscale serve --https=443 /var/www/html
# Serve with TLS-terminated TCP
tailscale serve --tls-terminated-tcp=5432 localhost:5432
# Check status
tailscale serve status
# Turn off
tailscale serve off# Share to entire internet (must be on ports 443, 8443, or 10000)
tailscale funnel 3000
# Turn off
tailscale funnel off{
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:*"]
}
]
}{
"groups": {
"group:engineering": ["user1@example.com", "user2@example.com"],
"group:ops": ["ops@example.com"]
},
"tagOwners": {
"tag:dev": ["group:engineering"],
"tag:prod": ["group:ops"]
},
"acls": [
{
"action": "accept",
"src": ["group:engineering"],
"dst": ["tag:dev:*"]
},
{
"action": "accept",
"src": ["group:ops"],
"dst": ["tag:prod:*"]
}
]
}{
"grants": [
{
"src": ["group:engineering"],
"dst": ["tag:dev"],
"ip": ["*"]
},
{
"src": ["group:ops"],
"dst": ["tag:prod"],
"ip": ["22", "443", "80"]
}
]
}# On home server
sudo tailscale up --advertise-routes=192.168.1.0/24
# From anywhere
ssh homeserver
# Access 192.168.1.* devices through homeserver# Set home device as exit node before trip
tailscale set --exit-node=home-server
# All traffic now routes through home# Site A router
sudo tailscale up --advertise-routes=10.0.0.0/24
# Site B router
sudo tailscale up --advertise-routes=10.1.0.0/24 --accept-routes
# Now Site B can reach Site A's 10.0.0.0/24 network# Check if devices can establish connection (ignores ACLs)
tailscale ping --tsmp peer-name
# Check end-to-end including ACLs
tailscale ping --icmp peer-name
# View network map and connection details
tailscale netcheck
# Debug daemon logs
tailscale debug daemon-logs
# Check DERP relay status
tailscale netcheck# Preview rules for specific user (in admin console)
# Access Controls → Preview rules → select user
# Test ACL in policy file
# Add to policy:
"tests": [
{
"src": "user@example.com",
"accept": ["tag:server:22"],
"deny": ["tag:prod:*"]
}
]# Verify IP forwarding enabled
cat /proc/sys/net/ipv4/ip_forward # Should be 1
# Check firewall isn't blocking
sudo iptables -L -v -n
sudo iptables -t nat -L -v -n
# Verify routes advertised
tailscale status | grep "subnet router"
# On client, ensure routes accepted
tailscale status | grep "routes accepted"# Check MagicDNS enabled
tailscale status | grep MagicDNS
# In admin console: DNS → Enable MagicDNS
# Flush DNS cache
# macOS
sudo dscacheutil -flushcache
# Linux (systemd-resolved)
sudo systemd-resolve --flush-cachessudo tailscale up --advertise-tags=tag:server--auth-key{
"grants": [{
"src": ["group:devs"],
"dst": ["tag:dev"],
"ip": ["22", "80", "443"] // Only SSH and HTTP(S)
}]
}# Generate in admin console → Settings → Keys
sudo tailscale up --auth-key=tskey-auth-...# Multiple subnet routers with same routes = automatic failover
# Router 1
sudo tailscale up --advertise-routes=10.0.0.0/24
# Router 2
sudo tailscale up --advertise-routes=10.0.0.0/24NETDEV=$(ip -o route get 8.8.8.8 | cut -f 5 -d " ")
sudo ethtool -K $NETDEV rx-udp-gro-forwarding on rx-gro-list offtailscale statusreferences/cli-reference.mdreferences/acl-examples.mdreferences/api-usage.mdreferences/troubleshooting.mdreferences/production-setup.mdscripts/setup_subnet_router.shscripts/setup_exit_node.sh