X-Git-Url: https://git.openstreetmap.org./chef.git/blobdiff_plain/6e0c9995ffe09af3f2fe836d95a612709179ebca..135b0061b49861418cfb01083c1fe42e9f11c1db:/cookbooks/prometheus/templates/default/alert_rules.yml.erb?ds=sidebyside diff --git a/cookbooks/prometheus/templates/default/alert_rules.yml.erb b/cookbooks/prometheus/templates/default/alert_rules.yml.erb index 7d7fa4abf..16496c12d 100644 --- a/cookbooks/prometheus/templates/default/alert_rules.yml.erb +++ b/cookbooks/prometheus/templates/default/alert_rules.yml.erb @@ -1,13 +1,36 @@ # DO NOT EDIT - This file is being maintained by Chef groups: - - name: alertmanager + - name: amsterdam rules: - - alert: prometheus target missing - expr: up == 0 - for: 5m + - alert: pdu current draw + expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10 + for: 6m labels: - alertgroup: "prometheus" + alertgroup: "amsterdam" + annotations: + current: "{{ $value | humanize }}A" + - alert: site current draw + expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + current: "{{ $value | humanize }}A" + - alert: site temperature + expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25.5 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + temperature: "{{ $value | humanize }}C" + - alert: site humidity + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + humidity: "{{ $value | humanizePercentage }}" - name: apache rules: - alert: apache down @@ -22,15 +45,57 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: busy_workers: "{{ $value | humanizePercentage }}" + - alert: apache low request rate + expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2 + for: 15m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + request_rate: "{{ $value | humanizePercentage }}" + - name: chef + rules: + - alert: chef client not running + expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600 + for: 12h + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + down_time: "{{ $value | humanizeDuration }}" + - name: cisco + rules: + - alert: cisco fan alarm + expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}" + - alert: cisco temperature alarm + expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}" + - alert: cisco main power alarm + expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: cisco redundant power alarm + expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" - name: cpu rules: - alert: cpu pressure - expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.3 + expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6 for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: - major_page_faults: "{{ $value | humanizePercentage }}" + pressure: "{{ $value | humanizePercentage }}" - name: database rules: - alert: postgres replication delay @@ -40,10 +105,59 @@ groups: alertgroup: database annotations: delay: "{{ $value | humanizeDuration }}" + - name: dublin + rules: + - alert: pdu current draw + expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}A" + - alert: site current draw + expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17 + for: 6m + labels: + alertgroup: "dublin" + annotations: + current: "{{ $value | humanize }}A" + - alert: site temperature + expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 25.5 + for: 6m + labels: + alertgroup: "dublin" + annotations: + temperature: "{{ $value | humanize }}C" + - alert: site humidity + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65 + for: 6m + labels: + alertgroup: "dublin" + annotations: + humidity: "{{ $value | humanizePercentage }}" + - name: fastly + rules: + - alert: fastly error rate + expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005 + for: 15m + labels: + alertgroup: fastly + annotations: + error_rate: "{{ $value | humanizePercentage }}" + - alert: fastly healthcheck failing + expr: count(fastly_healthcheck_status == 0) by (service) > 0 + for: 15m + labels: + alertgroup: fastly + - alert: multiple fastly healthchecks failing + expr: count(fastly_healthcheck_status == 0) by (service) > 4 + for: 5m + labels: + alertgroup: fastly - name: filesystem rules: - alert: readonly filesystem - expr: node_filesystem_readonly == 1 + expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d]) for: 0m labels: alertgroup: "{{ $labels.instance }}" @@ -95,13 +209,13 @@ groups: in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" - name: io rules: - - alert: cpu pressure + - alert: io pressure expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: - major_page_faults: "{{ $value | humanizePercentage }}" + pressure: "{{ $value | humanizePercentage }}" - name: ipmi rules: - alert: ipmi fan alarm @@ -130,6 +244,44 @@ groups: for: 5m labels: alertgroup: "{{ $labels.instance }}" + - name: juniper + rules: + - alert: juniper cpu alarm + expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: juniper fan alarm + expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: juniper power alarm + expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - name: mail + rules: + - alert: exim down + expr: exim_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: exim queue length + expr: exim_queue > exim_queue_limit + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" + - alert: mailman queue length + expr: mailman_queue_length > 200 + for: 60m + labels: + alertgroup: mail + annotations: + queue_length: "{{ $value }}" - name: mdadm rules: - alert: mdadm array inactive @@ -177,7 +329,7 @@ groups: labels: alertgroup: "{{ $labels.instance }}" annotations: - major_page_faults: "{{ $value | humanizePercentage }}" + pressure: "{{ $value | humanizePercentage }}" - alert: oom kill detected expr: increase(node_vmstat_oom_kill[1m]) > 0 for: 0m @@ -202,12 +354,19 @@ groups: annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface transmit errors - expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01 + expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" + - alert: wireguard interface transmit errors + expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05 + for: 1h + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + error_rate: "{{ $value | humanizePercentage }}" - alert: interface receive errors expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01 for: 5m @@ -222,6 +381,48 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: entries_used: "{{ $value | humanizePercentage }}" + - name: nominatim + rules: + - alert: nominatim replication delay + expr: nominatim_replication_delay > 10800 + for: 1h + labels: + alertgroup: nominatim + annotations: + delay: "{{ $value | humanizeDuration }}" + - name: overpass + rules: + - alert: overpass osm database age + expr: overpass_database_age_seconds{database="osm"} > 3600 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - alert: overpass area database age + expr: overpass_database_age_seconds{database="area"} > 86400 + for: 1h + labels: + alertgroup: overpass + annotations: + age: "{{ $value | humanizeDuration }}" + - name: passenger + rules: + - alert: passenger down + expr: passenger_up == 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger queuing + expr: passenger_top_level_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: passenger application queuing + expr: passenger_app_request_queue > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" - name: planet rules: - alert: planet dump overdue @@ -288,7 +489,7 @@ groups: annotations: connections_used: "{{ $value | humanizePercentage }}" - alert: postgresql deadlocks - expr: increase(pg_stat_database_deadlocks[1m]) > 5 + expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5 for: 0m labels: alertgroup: "{{ $labels.instance }}" @@ -301,6 +502,46 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: queries: "{{ $value }}" + - name: prometheus + rules: + - alert: prometheus configuration error + expr: prometheus_config_last_reload_successful == 0 + for: 10m + labels: + alertgroup: "prometheus" + - alert: prometheus target missing + expr: up == 0 + for: 10m + labels: + alertgroup: "prometheus" + - name: raid + rules: + - alert: raid array degraded + expr: ohai_array_info{status="degraded"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - alert: raid disk failed + expr: ohai_disk_info{status="failed"} > 0 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + - name: rasdaemon + rules: + - alert: memory controller errors + expr: increase(rasdaemon_mc_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_errors: "{{ $value }}" + - alert: pcie aer errors + expr: increase(rasdaemon_aer_events_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + new_ercrors: "{{ $value }}" - name: smart rules: - alert: smart failure @@ -309,7 +550,7 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: smart ssd wearout approaching - expr: smart_percentage_used >= 90 + expr: smart_percentage_used >= 80 for: 60m labels: alertgroup: "{{ $labels.instance }}" @@ -339,18 +580,30 @@ groups: for: 0m labels: alertgroup: ssl + - name: statuscake + rules: + - alert: statuscake uptime check failing + expr: statuscake_uptime{status="down",paused="false"} > 0 + for: 10m + labels: + alertgroup: statuscake - name: systemd rules: - alert: systemd failed service - expr: node_systemd_unit_state{state="failed"} == 1 + expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" + - alert: systemd failed chef client service + expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1 + for: 6h + labels: + alertgroup: "{{ $labels.instance }}" - name: tile rules: - alert: renderd replication delay expr: renderd_replication_delay > 120 - for: 5m + for: 15m labels: alertgroup: tile annotations: @@ -385,3 +638,10 @@ groups: alertgroup: web annotations: error_rate: "{{ $value | humanizePercentage }}" + - alert: job processing rate + expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1 + for: 15m + labels: + alertgroup: web + annotations: + job_processing_rate: "{{ $value | humanizePercentage }}"