X-Git-Url: https://git.openstreetmap.org./chef.git/blobdiff_plain/3a31afdddc4082bd26bc0840ef53c1f77ad75c31..fa2ffc4b6ec0b9572dac9470bcfb474894b8a722:/cookbooks/prometheus/templates/default/alert_rules.yml.erb diff --git a/cookbooks/prometheus/templates/default/alert_rules.yml.erb b/cookbooks/prometheus/templates/default/alert_rules.yml.erb index 5a9a70f5d..92c38d5c5 100644 --- a/cookbooks/prometheus/templates/default/alert_rules.yml.erb +++ b/cookbooks/prometheus/templates/default/alert_rules.yml.erb @@ -1,6 +1,79 @@ # DO NOT EDIT - This file is being maintained by Chef groups: + - name: alertmanager + rules: + - alert: prometheus target missing + expr: up == 0 + for: 5m + labels: + alertgroup: "prometheus" + - name: apache + rules: + - alert: apache down + expr: apache_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: apache workers busy + expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + busy_workers: "{{ $value | humanizePercentage }}" + - name: cpu + rules: + - alert: cpu pressure + expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.3 + for: 15m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + pressure: "{{ $value | humanizePercentage }}" + - name: database + rules: + - alert: postgres replication delay + expr: pg_replication_lag_seconds > 5 + for: 5m + labels: + alertgroup: database + annotations: + delay: "{{ $value | humanizeDuration }}" + - name: fastly + rules: + - alert: error rate + expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005 + for: 15m + labels: + alertgroup: fastly + annotations: + error_rate: "{{ $value | humanizePercentage }}" + - name: filesystem + rules: + - alert: readonly filesystem + expr: node_filesystem_readonly == 1 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: filesystem low on space + expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + percentage_free: "{{ $value | humanizePercentage }}" + free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}" + total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}" + - alert: filesystem low on inodes + expr: node_filesystem_files_free / node_filesystem_files < 0.1 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + percentage_free: "{{ $value | humanizePercentage }}" + free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}" + total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}" - name: hwmon rules: - alert: hwmon fan alarm @@ -29,6 +102,15 @@ groups: in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" + - name: io + rules: + - alert: io pressure + expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + pressure: "{{ $value | humanizePercentage }}" - name: ipmi rules: - alert: ipmi fan alarm @@ -52,6 +134,27 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}" + - alert: ipmi power alarm + expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - name: mail + rules: + - alert: exim queue length + expr: exim_queue > exim_queue_limit + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" + - alert: mailman queue length + expr: mailman_queue_length > 200 + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" - name: mdadm rules: - alert: mdadm array inactive @@ -64,6 +167,16 @@ groups: active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" + - alert: mdadm array degraded + expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" + active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" + failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" + spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" - alert: mdadm disk failed expr: node_md_disks{state="failed"} > 0 for: 0m @@ -74,3 +187,226 @@ groups: active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" + - name: memory + rules: + - alert: low memory + expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1 + for: 15m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + memory_free: "{{ $value | humanizePercentage }}" + - alert: memory pressure + expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + pressure: "{{ $value | humanizePercentage }}" + - alert: oom kill detected + expr: increase(node_vmstat_oom_kill[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_oom_kills: "{{ $value }}" + - name: network + rules: + - alert: interface transmit rate + expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + bandwidth_used: "{{ $value | humanizePercentage }}" + - alert: interface receive rate + expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + bandwidth_used: "{{ $value | humanizePercentage }}" + - alert: interface transmit errors + expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + error_rate: "{{ $value | humanizePercentage }}" + - alert: interface receive errors + expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + error_rate: "{{ $value | humanizePercentage }}" + - alert: conntrack entries + expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + entries_used: "{{ $value | humanizePercentage }}" + - name: planet + rules: + - alert: planet dump overdue + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 24h + labels: + alertgroup: planet + annotations: + overdue_by: "{{ $value | humanizeDuration }}" + - alert: notes dump overdue + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 6h + labels: + alertgroup: planet + annotations: + overdue_by: "{{ $value | humanizeDuration }}" + - alert: daily replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 3h + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: hourly replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 30m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: minutely replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 5m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: changeset replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 5m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - name: postgresql + rules: + - alert: postgresql down + expr: pg_up == 0 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: postgresql replication delay + expr: pg_replication_lag_seconds > 5 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + delay: "{{ $value | humanizeDuration }}" + - alert: postgresql connection limit + expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + connections_used: "{{ $value | humanizePercentage }}" + - alert: postgresql deadlocks + expr: increase(pg_stat_database_deadlocks[1m]) > 5 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_deadlocks: "{{ $value }}" + - alert: postgresql slow queries + expr: pg_slow_queries > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + queries: "{{ $value }}" + - name: smart + rules: + - alert: smart failure + expr: smart_health_status == 0 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: smart ssd wearout approaching + expr: smart_percentage_used >= 90 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + percentage_used: "{{ $value | humanizePercentage }}" + - name: ssl + rules: + - alert: ssl certificate probe failed + expr: ssl_probe_success == 0 + for: 60m + labels: + alertgroup: ssl + - alert: ssl certificate expiry + expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14 + for: 0m + labels: + alertgroup: ssl + annotations: + expires_in: "{{ $value | humanizeDuration }}" + - alert: ssl certificate revoked + expr: ssl_ocsp_response_status == 1 + for: 0m + labels: + alertgroup: ssl + - alert: ocsp status unknown + expr: ssl_ocsp_response_status == 1 + for: 0m + labels: + alertgroup: ssl + - name: systemd + rules: + - alert: systemd failed service + expr: node_systemd_unit_state{state="failed"} == 1 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - name: tile + rules: + - alert: renderd replication delay + expr: renderd_replication_delay > 120 + for: 5m + labels: + alertgroup: tile + annotations: + delay: "{{ $value | humanizeDuration }}" + - alert: missed tile rate + expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05 + for: 5m + labels: + alertgroup: tile + annotations: + miss_rate: "{{ $value | humanizePercentage }}" + - name: time + rules: + - alert: clock not synchronising + expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: clock skew detected + expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0) + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}" + - name: web + rules: + - alert: web error rate + expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 + for: 5m + labels: + alertgroup: web + annotations: + error_rate: "{{ $value | humanizePercentage }}"