X-Git-Url: https://git.openstreetmap.org./chef.git/blobdiff_plain/16ae8dd1960a18f6c82973aab884a142a8901f54..6ca4c5b1adbeff9556a2f63e7716d21d02495901:/cookbooks/prometheus/templates/default/alert_rules.yml.erb?ds=sidebyside diff --git a/cookbooks/prometheus/templates/default/alert_rules.yml.erb b/cookbooks/prometheus/templates/default/alert_rules.yml.erb index 521d2c92b..305afbd90 100644 --- a/cookbooks/prometheus/templates/default/alert_rules.yml.erb +++ b/cookbooks/prometheus/templates/default/alert_rules.yml.erb @@ -3,6 +3,20 @@ groups: - name: amsterdam rules: + - alert: he uplink + expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + status: "{{ $value }}" + - alert: equinix uplink + expr: junos_interface_up{site="amsterdam",name=~"xe-[01]/2/0"} != 1 + for: 6m + labels: + alertgroup: "amsterdam" + annotations: + status: "{{ $value }}" - alert: pdu current draw expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28 for: 6m @@ -11,21 +25,21 @@ groups: annotations: current: "{{ $value | humanize }}A" - alert: site power - expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3 + expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5 for: 6m labels: alertgroup: "amsterdam" annotations: current: "{{ $value | humanize }}kVA" - alert: site temperature - expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26 + expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32 for: 6m labels: alertgroup: "amsterdam" annotations: temperature: "{{ $value | humanize }}C" - alert: site humidity - expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65 + expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8 for: 6m labels: alertgroup: "amsterdam" @@ -45,13 +59,13 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: busy_workers: "{{ $value | humanizePercentage }}" - - alert: apache low request rate - expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2 - for: 15m + - alert: apache connection limit + expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8 + for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: - request_rate: "{{ $value | humanizePercentage }}" + connections: "{{ $value | humanizePercentage }}" - name: chef rules: - alert: chef client not running @@ -98,13 +112,20 @@ groups: pressure: "{{ $value | humanizePercentage }}" - name: database rules: - - alert: postgres replication delay - expr: pg_replication_lag_seconds > 30 - for: 15m + - alert: active rails queries + expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="rails",state="active"}) by (instance) > 50 and on (instance) chef_role{name="db-master"} + for: 5m labels: alertgroup: database annotations: - delay: "{{ $value | humanizeDuration }}" + queries: "{{ $value }}" + - alert: active cgimap queries + expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="cgimap",state="active"}) by (instance) > 30 and on (instance) chef_role{name="db-master"} + for: 5m + labels: + alertgroup: database + annotations: + queries: "{{ $value }}" - name: discourse rules: - alert: discourse job failure rate @@ -116,6 +137,20 @@ groups: failure_rate: "{{ $value }} jobs/s" - name: dublin rules: + - alert: he uplink + expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1 + for: 6m + labels: + alertgroup: "dublin" + annotations: + status: "{{ $value }}" + - alert: equinix uplink + expr: junos_interface_up{site="dublin",name=~"xe-[01]/2/0"} != 1 + for: 6m + labels: + alertgroup: "dublin" + annotations: + status: "{{ $value }}" - alert: pdu current draw expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28 for: 6m @@ -153,13 +188,23 @@ groups: alertgroup: fastly annotations: error_rate: "{{ $value | humanizePercentage }}" - - alert: fastly healthcheck failing - expr: count(fastly_healthcheck_status == 0) by (service) > 0 + - alert: fastly frontend healthcheck warning + expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2 + for: 15m + labels: + alertgroup: fastly + - alert: fastly frontend healthcheck critical + expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter) + for: 5m + labels: + alertgroup: fastly + - alert: fastly backend healthcheck warning + expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10 for: 15m labels: alertgroup: fastly - - alert: multiple fastly healthchecks failing - expr: count(fastly_healthcheck_status == 0) by (service) > 4 + - alert: fastly backend healthcheck critical + expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend) for: 5m labels: alertgroup: fastly @@ -255,21 +300,60 @@ groups: alertgroup: "{{ $labels.instance }}" - name: juniper rules: + - alert: juniper red alarms + expr: juniper_alarms_red_count > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + alarm_count: "{{ $value }} alarms" + - alert: juniper yellow alarms + expr: juniper_alarms_yellow_count > 0 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + alarm_count: "{{ $value }} alarms" - alert: juniper cpu alarm - expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30 + expr: junos_route_engine_load_average_five / 2 > 0.5 for: 5m labels: alertgroup: "{{ $labels.site }}" + annotations: + load_average: "{{ $value | humanizePercentage }}" - alert: juniper fan alarm - expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0 + expr: junos_environment_fan_up != 1 for: 5m labels: alertgroup: "{{ $labels.site }}" - alert: juniper power alarm - expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0 + expr: junos_environment_power_up != 1 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + - alert: juniper laser receive power + expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1 for: 5m labels: alertgroup: "{{ $labels.site }}" + annotations: + power: "{{ $value }} dBm" + - alert: juniper laser transmit power + expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1 + for: 5m + labels: + alertgroup: "{{ $labels.site }}" + annotations: + power: "{{ $value }} dBm" + - name: load + rules: + - alert: load average + expr: sum(node_load5) by (instance) / count(node_cpu_frequency_max_hertz) by (instance) > 2 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + load: "{{ $value | humanizePercentage }}" - name: mail rules: - alert: exim down @@ -278,7 +362,7 @@ groups: labels: alertgroup: "{{ $labels.instance }}" - alert: exim queue length - expr: exim_queue > exim_queue_limit + expr: exim_queue > ignoring(job) exim_queue_limit for: 60m labels: alertgroup: mail @@ -360,17 +444,31 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: connections_used: "{{ $value | humanizePercentage }}" + - alert: mysql connection errors + expr: increase(mysql_global_status_connection_errors_total[1m]) > 0 + for: 0m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + error_count: "{{ $value }}" - name: network rules: + - alert: interface redundancy lost + expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)") + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" + annotations: + link_count: "{{ $value }}" - alert: interface transmit rate - expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98 + expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface receive rate - expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98 + expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" @@ -413,6 +511,11 @@ groups: alertgroup: nominatim annotations: delay: "{{ $value | humanizeDuration }}" + - alert: nominatim connections + expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500 + for: 15m + labels: + alertgroup: nominatim - name: overpass rules: - alert: overpass osm database age @@ -518,13 +621,6 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: new_deadlocks: "{{ $value }}" - - alert: postgresql slow queries - expr: pg_slow_queries > 0 - for: 5m - labels: - alertgroup: "{{ $labels.instance }}" - annotations: - queries: "{{ $value }}" - alert: postgresql idle transactions expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server) for: 5m @@ -544,6 +640,11 @@ groups: for: 10m labels: alertgroup: "prometheus" + - alert: node exporter text file scrape error + expr: node_textfile_scrape_error > 0 + for: 10m + labels: + alertgroup: "prometheus" - name: raid rules: - alert: raid controller battery failed @@ -582,6 +683,13 @@ groups: alertgroup: "{{ $labels.instance }}" annotations: new_errors: "{{ $value }}" + - name: resolved + rules: + - alert: dnssec validation failures + expr: rate(resolved_dnssec_verdicts_total{result="bogus"}[1m]) > 1 + for: 5m + labels: + alertgroup: "{{ $labels.instance }}" - name: smart rules: - alert: smart failure @@ -660,14 +768,14 @@ groups: - name: taginfo rules: - alert: taginfo planet age - expr: time() - taginfo_data_from_seconds > 129600 + expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"} for: 0m labels: alertgroup: taginfo annotations: age: "{{ $value | humanizeDuration }}" - alert: taginfo database age - expr: time() - taginfo_database_update_finish_seconds > 129600 + expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"} for: 0m labels: alertgroup: taginfo @@ -720,15 +828,15 @@ groups: - name: web rules: - alert: web error rate - expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 + expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.01 for: 5m labels: alertgroup: web annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: job processing rate - expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1 - for: 15m + expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1 + for: 1h labels: alertgroup: web annotations: