X-Git-Url: https://git.openstreetmap.org./chef.git/blobdiff_plain/e09802c94505e0d11915429f0bf17d56bbcdcfb2..06f75772a25febcfd00bfb8607ce3931520adb22:/cookbooks/prometheus/templates/default/alert_rules.yml.erb diff --git a/cookbooks/prometheus/templates/default/alert_rules.yml.erb b/cookbooks/prometheus/templates/default/alert_rules.yml.erb index 0469226db..d717e4f0a 100644 --- a/cookbooks/prometheus/templates/default/alert_rules.yml.erb +++ b/cookbooks/prometheus/templates/default/alert_rules.yml.erb @@ -1,13 +1,43 @@ # DO NOT EDIT - This file is being maintained by Chef groups: - - name: alertmanager + - name: amsterdam rules: - - alert: prometheus target missing - expr: up == 0 - for: 5m + - alert: uplink + expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1 + for: 6m labels: - alertgroup: "prometheus" + alertgroup: "amsterdam" + annotations: + status: "{{ $value }}" + - alert: pdu current draw + expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + current: "{{ $value | humanize }}A" + - alert: site power + expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + current: "{{ $value | humanize }}kVA" + - alert: site temperature + expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + temperature: "{{ $value | humanize }}C" + - alert: site humidity + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + humidity: "{{ $value | humanizePercentage }}" - name: apache rules: - alert: apache down @@ -22,17 +52,141 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: busy_workers: "{{ $value | humanizePercentage }}" + - name: chef + rules: + - alert: chef client not running + expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600 + for: 12h + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + down_time: "{{ $value | humanizeDuration }}" + - name: cisco + rules: + - alert: cisco fan alarm + expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}" + - alert: cisco temperature alarm + expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}" + - alert: cisco main power alarm + expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: cisco redundant power alarm + expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - name: cpu + rules: + - alert: cpu pressure + expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + pressure: "{{ $value | humanizePercentage }}" - name: database rules: - alert: postgres replication delay - expr: pg_replication_lag_seconds > 5 - for: 5m + expr: pg_replication_lag_seconds > 30 + for: 15m labels: alertgroup: database annotations: delay: "{{ $value | humanizeDuration }}" + - name: discourse + rules: + - alert: discourse job failure rate + expr: rate(discourse_job_failures[5m]) > 0 + for: 5m + labels: + alertgroup: discourse + annotations: + failure_rate: "{{ $value }} jobs/s" + - name: dublin + rules: + - alert: uplink + expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1 + for: 6m + labels: + alertgroup: "dublin" + annotations: + status: "{{ $value }}" + - alert: pdu current draw + expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}A" + - alert: site power + expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}kVA" + - alert: site temperature + expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26 + for: 6m + labels: + alertgroup: "dublin" + annotations: + temperature: "{{ $value | humanize }}C" + - alert: site humidity + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65 + for: 6m + labels: + alertgroup: "dublin" + annotations: + humidity: "{{ $value | humanizePercentage }}" + - name: fastly + rules: + - alert: fastly error rate + expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005 + for: 15m + labels: + alertgroup: fastly + annotations: + error_rate: "{{ $value | humanizePercentage }}" + - alert: fastly frontend healthcheck warning + expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2 + for: 15m + labels: + alertgroup: fastly + - alert: fastly frontend healthcheck critical + expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter) + for: 5m + labels: + alertgroup: fastly + - alert: fastly backend healthcheck warning + expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10 + for: 15m + labels: + alertgroup: fastly + - alert: fastly backend healthcheck critical + expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend) + for: 5m + labels: + alertgroup: fastly - name: filesystem rules: + - alert: readonly filesystem + expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d]) + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" - alert: filesystem low on space expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05 for: 5m @@ -79,6 +233,15 @@ groups: in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" + - name: io + rules: + - alert: io pressure + expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6 + for: 60m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + pressure: "{{ $value | humanizePercentage }}" - name: ipmi rules: - alert: ipmi fan alarm @@ -107,6 +270,60 @@ groups: for: 5m labels: alertgroup: "{{ $labels.instance }}" + - name: juniper + rules: + - alert: juniper red alarms + expr: juniper_alarms_red_count > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + alarm_count: "{{ $value }} alarms" + - alert: juniper yellow alarms + expr: juniper_alarms_yellow_count > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + alarm_count: "{{ $value }} alarms" + - alert: juniper cpu alarm + expr: junos_route_engine_load_average_five / 2 > 0.5 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + load_average: "{{ $value | humanizePercentage }}" + - alert: juniper fan alarm + expr: junos_environment_fan_up != 1 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: juniper power alarm + expr: junos_environment_power_up != 1 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - name: mail + rules: + - alert: exim down + expr: exim_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: exim queue length + expr: exim_queue > ignoring(job) exim_queue_limit + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" + - alert: mailman queue length + expr: mailman_queue_length > 200 + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" - name: mdadm rules: - alert: mdadm array inactive @@ -143,18 +360,18 @@ groups: rules: - alert: low memory expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1 - for: 5m + for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: memory_free: "{{ $value | humanizePercentage }}" - alert: memory pressure - expr: rate(node_vmstat_pgmajfault[1m]) > 1000 - for: 5m + expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6 + for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: - major_page_faults: "{{ $value }} faults/s" + pressure: "{{ $value | humanizePercentage }}" - alert: oom kill detected expr: increase(node_vmstat_oom_kill[1m]) > 0 for: 0m @@ -162,29 +379,50 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: new_oom_kills: "{{ $value }}" + - name: mysql + rules: + - alert: mysql down + expr: mysql_up == 0 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: mysql connection limit + expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + connections_used: "{{ $value | humanizePercentage }}" - name: network rules: - alert: interface transmit rate - expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98 + expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface receive rate - expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98 + expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface transmit errors - expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01 + expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" + - alert: wireguard interface transmit errors + expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05 + for: 1h + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + error_rate: "{{ $value | humanizePercentage }}" - alert: interface receive errors expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01 for: 5m @@ -199,6 +437,92 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: entries_used: "{{ $value | humanizePercentage }}" + - name: nominatim + rules: + - alert: nominatim replication delay + expr: nominatim_replication_delay > 10800 + for: 1h + labels: + alertgroup: nominatim + annotations: + delay: "{{ $value | humanizeDuration }}" + - name: overpass + rules: + - alert: overpass osm database age + expr: overpass_database_age_seconds{database="osm"} > 3600 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - alert: overpass area database age + expr: overpass_database_age_seconds{database="area"} > 86400 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - name: passenger + rules: + - alert: passenger down + expr: passenger_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger queuing + expr: passenger_top_level_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger application queuing + expr: passenger_app_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - name: planet + rules: + - alert: planet dump overdue + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 24h + labels: + alertgroup: planet + annotations: + overdue_by: "{{ $value | humanizeDuration }}" + - alert: notes dump overdue + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 6h + labels: + alertgroup: planet + annotations: + overdue_by: "{{ $value | humanizeDuration }}" + - alert: daily replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 3h + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: hourly replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 30m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: minutely replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 5m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" + - alert: changeset replication feed delayed + expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 + for: 5m + labels: + alertgroup: planet + annotations: + delayed_by: "{{ $value | humanizeDuration }}" - name: postgresql rules: - alert: postgresql down @@ -207,8 +531,8 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: postgresql replication delay - expr: pg_replication_lag_seconds > 5 - for: 1m + expr: pg_replication_lag_seconds > 30 + for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: @@ -221,7 +545,7 @@ groups: annotations: connections_used: "{{ $value | humanizePercentage }}" - alert: postgresql deadlocks - expr: increase(pg_stat_database_deadlocks[1m]) > 5 + expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5 for: 0m labels: alertgroup: "{{ $labels.instance }}" @@ -234,6 +558,63 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: queries: "{{ $value }}" + - alert: postgresql idle transactions + expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server) + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + queries: "{{ $value }}" + - name: prometheus + rules: + - alert: prometheus configuration error + expr: prometheus_config_last_reload_successful == 0 + for: 10m + labels: + alertgroup: "prometheus" + - alert: prometheus target missing + expr: up == 0 + for: 10m + labels: + alertgroup: "prometheus" + - name: raid + rules: + - alert: raid controller battery failed + expr: ohai_controller_info{battery_status="failed"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: raid controller battery recharging + expr: ohai_controller_info{battery_status="recharging"} > 0 + for: 4h + labels: + alertgroup: "{{ $labels.instance }}" + - alert: raid array degraded + expr: ohai_array_info{status="degraded"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: raid disk failed + expr: ohai_disk_info{status="failed"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - name: rasdaemon + rules: + - alert: memory controller errors + expr: increase(rasdaemon_mc_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_errors: "{{ $value }}" + - alert: pcie aer errors + expr: increase(rasdaemon_aer_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_errors: "{{ $value }}" - name: smart rules: - alert: smart failure @@ -242,12 +623,30 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: smart ssd wearout approaching - expr: smart_percentage_used >= 90 + expr: smart_percentage_used / 100 >= 0.8 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: percentage_used: "{{ $value | humanizePercentage }}" + - name: smokeping + rules: + - alert: packet loss + expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02 + for: 10m + labels: + alertgroup: smokeping + annotations: + loss_rate: "{{ $value | humanizePercentage }}" + - name: snmp + rules: + - alert: snmp pdus missing + expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0 + for: 15m + labels: + alertgroup: snmp + annotations: + missing_pdus: "{{ $value }}" - name: ssl rules: - alert: ssl certificate probe failed @@ -272,18 +671,53 @@ groups: for: 0m labels: alertgroup: ssl + - name: statuscake + rules: + - alert: statuscake uptime check failing + expr: statuscake_paused == 0 and statuscake_up == 0 + for: 10m + labels: + alertgroup: statuscake - name: systemd rules: - alert: systemd failed service - expr: node_systemd_unit_state{state="failed"} == 1 + expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" + - alert: systemd failed chef client service + expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + - name: taginfo + rules: + - alert: taginfo planet age + expr: time() - taginfo_data_from_seconds > 129600 + for: 0m + labels: + alertgroup: taginfo + annotations: + age: "{{ $value | humanizeDuration }}" + - alert: taginfo database age + expr: time() - taginfo_database_update_finish_seconds > 129600 + for: 0m + labels: + alertgroup: taginfo + annotations: + age: "{{ $value | humanizeDuration }}" + - alert: taginfo database size + expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1 + for: 30m + labels: + alertgroup: taginfo + annotations: + size_change: "{{ $value | humanizePercentage }}" - name: tile rules: - alert: renderd replication delay expr: renderd_replication_delay > 120 - for: 5m + for: 15m labels: alertgroup: tile annotations: @@ -295,6 +729,13 @@ groups: alertgroup: tile annotations: miss_rate: "{{ $value | humanizePercentage }}" + - alert: tile render rate + expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0 + for: 15m + labels: + alertgroup: tile + annotations: + render_rate: "{{ $value }} tiles/s" - name: time rules: - alert: clock not synchronising @@ -312,9 +753,16 @@ groups: - name: web rules: - alert: web error rate - expr: sum(rate(api_call_count_total{status=~"5.*"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 + expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 for: 5m labels: alertgroup: web annotations: error_rate: "{{ $value | humanizePercentage }}" + - alert: job processing rate + expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1 + for: 1h + labels: + alertgroup: web + annotations: + job_processing_rate: "{{ $value | humanizePercentage }}"