Loading...
Loading...
Configure Prometheus Alertmanager with routing trees, receivers (Slack, PagerDuty, email), inhibition rules, silences, and notification templates for actionable incident alerting. Use when implementing proactive monitoring with automated incident detection, routing alerts to the appropriate team by severity, reducing alert fatigue through grouping and deduplication, integrating with on-call systems like PagerDuty, or migrating from legacy alerting to Prometheus-based alerting.
npx skill4agent add pjt222/development-guides configure-alerting-rulesSee Extended Examples for complete configuration files and templates.
version: '3.8'
services:
alertmanager:
image: prom/alertmanager:v0.26.0
ports:
- "9093:9093"
volumes:
- ./alertmanager.yml:/etc/alertmanager/alertmanager.yml
# ... (see EXAMPLES.md for complete configuration)alertmanager.ymlglobal:
resolve_timeout: 5m
slack_api_url: 'https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK'
route:
receiver: 'default-receiver'
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
group_interval: 5m
repeat_interval: 4h
routes:
- match:
severity: critical
receiver: pagerduty-critical
# ... (see EXAMPLES.md for complete routing, inhibition rules, and receivers)prometheus.ymlalerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
timeout: 10s
api_version: v2http://localhost:9093docker logs alertmanagercurl http://alertmanager:9093/api/v2/statuscurl -X POST <SLACK_WEBHOOK_URL> -d '{"text":"test"}'amtool check-config alertmanager.yml/etc/prometheus/rules/alerts.ymlgroups:
- name: instance_alerts
interval: 30s
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: critical
team: infrastructure
annotations:
summary: "Instance {{ $labels.instance }} is down"
description: "{{ $labels.instance }} has been down for >5min."
runbook_url: "https://wiki.example.com/runbooks/instance-down"
- alert: HighCPUUsage
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 10m
labels:
severity: warning
annotations:
summary: "High CPU usage on {{ $labels.instance }}"
# ... (see EXAMPLES.md for complete alerts)for# prometheus.yml
rule_files:
- "rules/*.yml"promtool check rules /etc/prometheus/rules/alerts.yml
curl -X POST http://localhost:9090/-/reloadpromtool check rules/etc/alertmanager/templates/default.tmpl{{ define "slack.default.title" }}
[{{ .Status | toUpper }}] {{ .GroupLabels.alertname }}
{{ end }}
{{ define "slack.default.text" }}
{{ range .Alerts }}
*Alert:* {{ .Labels.alertname }}
*Severity:* {{ .Labels.severity }}
*Summary:* {{ .Annotations.summary }}
{{ if .Annotations.runbook_url }}*Runbook:* {{ .Annotations.runbook_url }}{{ end }}
{{ end }}
{{ end }}
# ... (see EXAMPLES.md for complete email and PagerDuty templates)receivers:
- name: 'slack-custom'
slack_configs:
- channel: '#alerts'
title: '{{ template "slack.default.title" . }}'
text: '{{ template "slack.default.text" . }}'amtool template test --config.file=alertmanager.yml{{ . | json }}route:
receiver: 'default-receiver'
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
routes:
- match:
team: platform
receiver: 'team-platform'
routes:
- match:
severity: critical
receiver: 'pagerduty-platform'
group_wait: 10s
repeat_interval: 15m
continue: true # Also send to Slack
# ... (see EXAMPLES.md for complete routing with time intervals)# Group by alertname: All HighCPU alerts bundled together
group_by: ['alertname']
# Group by alertname AND cluster: Separate notifications per cluster
group_by: ['alertname', 'cluster']amtool config routes test --config.file=alertmanager.yml --alertname=HighCPU --label=severity=criticalamtool config routes show --config.file=alertmanager.ymlcontinue: trueinhibit_rules:
# Cluster down suppresses all node alerts in that cluster
- source_match:
alertname: 'ClusterDown'
severity: 'critical'
target_match_re:
alertname: '(InstanceDown|HighCPU|HighMemory)'
equal: ['cluster']
# Service down suppresses latency and error alerts
- source_match:
alertname: 'ServiceDown'
target_match_re:
alertname: '(HighLatency|HighErrorRate)'
equal: ['service', 'namespace']
# ... (see EXAMPLES.md for more inhibition patterns)# Silence during maintenance
amtool silence add \
instance=app-server-1 \
--author="ops-team" \
--comment="Scheduled maintenance" \
--duration=2h
# List and manage silences
amtool silence query
amtool silence expire <SILENCE_ID>receivers:
- name: 'pagerduty'
pagerduty_configs:
- routing_key: 'YOUR_INTEGRATION_KEY'
severity: '{{ .CommonLabels.severity }}'
description: '{{ range .Alerts.Firing }}{{ .Annotations.summary }}{{ end }}'
details:
firing: '{{ .Alerts.Firing | len }}'
alertname: '{{ .GroupLabels.alertname }}'
# ... (see EXAMPLES.md for complete integration examples)receivers:
- name: 'webhook-custom'
webhook_configs:
- url: 'https://your-webhook-endpoint.com/alerts'
send_resolved: true--log.level=debugforfor['...']setup-prometheus-monitoringdefine-slo-sli-slawrite-incident-runbookbuild-grafana-dashboards