X-Git-Url: https://git.openstreetmap.org./chef.git/blobdiff_plain/090c2f22122d4f99a07198f59c55285e0250b78b..3eb308cdf3235e6d34cba81d1c88e3ab6ceaac9f:/cookbooks/prometheus/templates/default/alert_rules.yml.erb diff --git a/cookbooks/prometheus/templates/default/alert_rules.yml.erb b/cookbooks/prometheus/templates/default/alert_rules.yml.erb index 5809b570b..c34647330 100644 --- a/cookbooks/prometheus/templates/default/alert_rules.yml.erb +++ b/cookbooks/prometheus/templates/default/alert_rules.yml.erb @@ -1,32 +1,32 @@ -c# DO NOT EDIT - This file is being maintained by Chef +# DO NOT EDIT - This file is being maintained by Chef groups: - name: amsterdam rules: - alert: pdu current draw expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10 - for: 5m + for: 6m labels: alertgroup: "amsterdam" annotations: current: "{{ $value | humanize }}A" - alert: site current draw expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13 - for: 5m + for: 6m labels: alertgroup: "amsterdam" annotations: current: "{{ $value | humanize }}A" - alert: site temperature - expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25 - for: 5m + expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26 + for: 6m labels: alertgroup: "amsterdam" annotations: temperature: "{{ $value | humanize }}C" - alert: site humidity expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65 - for: 5m + for: 6m labels: alertgroup: "amsterdam" annotations: @@ -90,8 +90,8 @@ groups: - name: cpu rules: - alert: cpu pressure - expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6 - for: 15m + expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75 + for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: @@ -99,12 +99,51 @@ groups: - name: database rules: - alert: postgres replication delay - expr: pg_replication_lag_seconds > 5 - for: 5m + expr: pg_replication_lag_seconds > 30 + for: 15m labels: alertgroup: database annotations: delay: "{{ $value | humanizeDuration }}" + - name: discourse + rules: + - alert: discourse job failure rate + expr: rate(discourse_job_failures[5m]) > 0 + for: 5m + labels: + alertgroup: discourse + annotations: + failure_rate: "{{ $value }} jobs/s" + - name: dublin + rules: + - alert: pdu current draw + expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}A" + - alert: site current draw + expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}A" + - alert: site temperature + expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26 + for: 6m + labels: + alertgroup: "dublin" + annotations: + temperature: "{{ $value | humanize }}C" + - alert: site humidity + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65 + for: 6m + labels: + alertgroup: "dublin" + annotations: + humidity: "{{ $value | humanizePercentage }}" - name: fastly rules: - alert: fastly error rate @@ -115,19 +154,19 @@ groups: annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: fastly healthcheck failing - expr: count(fastly_healthcheck_status == 0) > 0 + expr: count(fastly_healthcheck_status == 0) by (service) > 0 for: 15m labels: alertgroup: fastly - - alert: fastly healthcheck failing - expr: count(fastly_healthcheck_status == 0) > 4 + - alert: multiple fastly healthchecks failing + expr: count(fastly_healthcheck_status == 0) by (service) > 4 for: 5m labels: alertgroup: fastly - name: filesystem rules: - alert: readonly filesystem - expr: node_filesystem_readonly == 1 + expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d]) for: 0m labels: alertgroup: "{{ $labels.instance }}" @@ -216,18 +255,28 @@ groups: alertgroup: "{{ $labels.instance }}" - name: juniper rules: + - alert: juniper cpu alarm + expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" - alert: juniper fan alarm - expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0 + expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" - alert: juniper power alarm - expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0 + expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" - name: mail rules: + - alert: exim down + expr: exim_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" - alert: exim queue length expr: exim_queue > exim_queue_limit for: 60m @@ -297,6 +346,20 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: new_oom_kills: "{{ $value }}" + - name: mysql + rules: + - alert: mysql down + expr: mysql_up == 0 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: mysql connection limit + expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8 + for: 1m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + connections_used: "{{ $value | humanizePercentage }}" - name: network rules: - alert: interface transmit rate @@ -320,7 +383,7 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" - - alert: interface transmit errors + - alert: wireguard interface transmit errors expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05 for: 1h labels: @@ -341,6 +404,48 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: entries_used: "{{ $value | humanizePercentage }}" + - name: nominatim + rules: + - alert: nominatim replication delay + expr: nominatim_replication_delay > 10800 + for: 1h + labels: + alertgroup: nominatim + annotations: + delay: "{{ $value | humanizeDuration }}" + - name: overpass + rules: + - alert: overpass osm database age + expr: overpass_database_age_seconds{database="osm"} > 3600 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - alert: overpass area database age + expr: overpass_database_age_seconds{database="area"} > 86400 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - name: passenger + rules: + - alert: passenger down + expr: passenger_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger queuing + expr: passenger_top_level_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger application queuing + expr: passenger_app_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" - name: planet rules: - alert: planet dump overdue @@ -393,8 +498,8 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: postgresql replication delay - expr: pg_replication_lag_seconds > 5 - for: 1m + expr: pg_replication_lag_seconds > 30 + for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: @@ -407,7 +512,7 @@ groups: annotations: connections_used: "{{ $value | humanizePercentage }}" - alert: postgresql deadlocks - expr: increase(pg_stat_database_deadlocks[1m]) > 5 + expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5 for: 0m labels: alertgroup: "{{ $labels.instance }}" @@ -434,6 +539,11 @@ groups: alertgroup: "prometheus" - name: raid rules: + - alert: raid controller battery failed + expr: ohai_controller_info{battery_status="failed"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" - alert: raid array degraded expr: ohai_array_info{status="degraded"} > 0 for: 5m @@ -444,6 +554,22 @@ groups: for: 5m labels: alertgroup: "{{ $labels.instance }}" + - name: rasdaemon + rules: + - alert: memory controller errors + expr: increase(rasdaemon_mc_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_errors: "{{ $value }}" + - alert: pcie aer errors + expr: increase(rasdaemon_aer_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_errors: "{{ $value }}" - name: smart rules: - alert: smart failure @@ -452,12 +578,30 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: smart ssd wearout approaching - expr: smart_percentage_used >= 80 + expr: smart_percentage_used / 100 >= 0.8 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: percentage_used: "{{ $value | humanizePercentage }}" + - name: smokeping + rules: + - alert: packet loss + expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02 + for: 10m + labels: + alertgroup: smokeping + annotations: + loss_rate: "{{ $value | humanizePercentage }}" + - name: snmp + rules: + - alert: snmp pdus missing + expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0 + for: 15m + labels: + alertgroup: snmp + annotations: + missing_pdus: "{{ $value }}" - name: ssl rules: - alert: ssl certificate probe failed @@ -485,7 +629,7 @@ groups: - name: statuscake rules: - alert: statuscake uptime check failing - expr: statuscake_uptime{status="down",paused="false"} > 0 + expr: statuscake_paused == 0 and statuscake_up == 0 for: 10m labels: alertgroup: statuscake @@ -496,9 +640,9 @@ groups: for: 5m labels: alertgroup: "{{ $labels.instance }}" - - alert: systemd failed service - expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1 - for: 6h + - alert: systemd failed chef client service + expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0 + for: 0m labels: alertgroup: "{{ $labels.instance }}" - name: tile @@ -517,6 +661,13 @@ groups: alertgroup: tile annotations: miss_rate: "{{ $value | humanizePercentage }}" + - alert: tile render rate + expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0 + for: 15m + labels: + alertgroup: tile + annotations: + render_rate: "{{ $value }} tiles/s" - name: time rules: - alert: clock not synchronising