]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
workaround temporary statuscake IP list failure
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index f7b35990aeaaf813036f9223703cc37a231f193d..dfd885bb8055252fd72a99df4807eeba1f6c956c 100644 (file)
@@ -3,6 +3,13 @@
 groups:
   - name: amsterdam
     rules:
 groups:
   - name: amsterdam
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
@@ -18,14 +25,14 @@ groups:
         annotations:
           current: "{{ $value | humanize }}kVA"
       - alert: site temperature
         annotations:
           current: "{{ $value | humanize }}kVA"
       - alert: site temperature
-        expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           temperature: "{{ $value | humanize }}C"
       - alert: site humidity
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           temperature: "{{ $value | humanize }}C"
       - alert: site humidity
-        expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
+        expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8
         for: 6m
         labels:
           alertgroup: "amsterdam"
         for: 6m
         labels:
           alertgroup: "amsterdam"
@@ -45,6 +52,13 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
+      - alert: apache connection limit
+        expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          connections: "{{ $value | humanizePercentage }}"
   - name: chef
     rules:
       - alert: chef client not running
   - name: chef
     rules:
       - alert: chef client not running
@@ -91,13 +105,20 @@ groups:
           pressure: "{{ $value | humanizePercentage }}"
   - name: database
     rules:
           pressure: "{{ $value | humanizePercentage }}"
   - name: database
     rules:
-      - alert: postgres replication delay
-        expr: pg_replication_lag_seconds > 30
-        for: 15m
+      - alert: active rails queries
+        expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="rails",state="active"}) by (instance) > 50 and on (instance) chef_role{name="db-master"}
+        for: 5m
         labels:
           alertgroup: database
         annotations:
         labels:
           alertgroup: database
         annotations:
-          delay: "{{ $value | humanizeDuration }}"
+          queries: "{{ $value }}"
+      - alert: active cgimap queries
+        expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="cgimap",state="active"}) by (instance) > 30 and on (instance) chef_role{name="db-master"}
+        for: 5m
+        labels:
+          alertgroup: database
+        annotations:
+          queries: "{{ $value }}"
   - name: discourse
     rules:
       - alert: discourse job failure rate
   - name: discourse
     rules:
       - alert: discourse job failure rate
@@ -109,6 +130,13 @@ groups:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
@@ -146,13 +174,23 @@ groups:
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
-      - alert: fastly healthcheck failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 0
+      - alert: fastly frontend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
         for: 15m
         labels:
           alertgroup: fastly
         for: 15m
         labels:
           alertgroup: fastly
-      - alert: multiple fastly healthchecks failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 4
+      - alert: fastly frontend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
+        for: 5m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
         for: 5m
         labels:
           alertgroup: fastly
         for: 5m
         labels:
           alertgroup: fastly
@@ -248,21 +286,60 @@ groups:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
+      - alert: juniper red alarms
+        expr: juniper_alarms_red_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
+      - alert: juniper yellow alarms
+        expr: juniper_alarms_yellow_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
       - alert: juniper cpu alarm
       - alert: juniper cpu alarm
-        expr: jnxOperating5MinLoadAvg{jnxOperatingContentsIndex="9"} > 50
+        expr: junos_route_engine_load_average_five / 2 > 0.5
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          load_average: "{{ $value | humanizePercentage }}"
       - alert: juniper fan alarm
       - alert: juniper fan alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_fan_up != 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_power_up != 1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+      - alert: juniper laser receive power
+        expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          power: "{{ $value }} dBm"
+      - alert: juniper laser transmit power
+        expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          power: "{{ $value }} dBm"
+  - name: load
+    rules:
+      - alert: load average
+        expr: sum(node_load5) by (instance) / count(node_cpu_frequency_max_hertz) by (instance) > 2
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          load: "{{ $value | humanizePercentage }}"
   - name: mail
     rules:
       - alert: exim down
   - name: mail
     rules:
       - alert: exim down
@@ -353,8 +430,22 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           connections_used: "{{ $value | humanizePercentage }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           connections_used: "{{ $value | humanizePercentage }}"
+      - alert: mysql connection errors
+        expr: increase(mysql_global_status_connection_errors_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_count: "{{ $value }}"
   - name: network
     rules:
   - name: network
     rules:
+      - alert: interface redundancy lost
+        expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)")
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          link_count: "{{ $value }}"
       - alert: interface transmit rate
         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
       - alert: interface transmit rate
         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
@@ -406,6 +497,11 @@ groups:
           alertgroup: nominatim
         annotations:
           delay: "{{ $value | humanizeDuration }}"
           alertgroup: nominatim
         annotations:
           delay: "{{ $value | humanizeDuration }}"
+      - alert: nominatim connections
+        expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500
+        for: 15m
+        labels:
+          alertgroup: nominatim
   - name: overpass
     rules:
       - alert: overpass osm database age
   - name: overpass
     rules:
       - alert: overpass osm database age
@@ -511,13 +607,6 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           new_deadlocks: "{{ $value }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           new_deadlocks: "{{ $value }}"
-      - alert: postgresql slow queries
-        expr: pg_slow_queries > 0
-        for: 5m
-        labels:
-          alertgroup: "{{ $labels.instance }}"
-        annotations:
-          queries: "{{ $value }}"
       - alert: postgresql idle transactions
         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
         for: 5m
       - alert: postgresql idle transactions
         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
         for: 5m
@@ -537,6 +626,11 @@ groups:
         for: 10m
         labels:
           alertgroup: "prometheus"
         for: 10m
         labels:
           alertgroup: "prometheus"
+      - alert: node exporter text file scrape error
+        expr: node_textfile_scrape_error > 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
   - name: raid
     rules:
       - alert: raid controller battery failed
   - name: raid
     rules:
       - alert: raid controller battery failed
@@ -653,14 +747,14 @@ groups:
   - name: taginfo
     rules:
       - alert: taginfo planet age
   - name: taginfo
     rules:
       - alert: taginfo planet age
-        expr: time() - taginfo_data_from_seconds > 129600
+        expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
         for: 0m
         labels:
           alertgroup: taginfo
         annotations:
           age: "{{ $value | humanizeDuration }}"
       - alert: taginfo database age
         for: 0m
         labels:
           alertgroup: taginfo
         annotations:
           age: "{{ $value | humanizeDuration }}"
       - alert: taginfo database age
-        expr: time() - taginfo_database_update_finish_seconds > 129600
+        expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
         for: 0m
         labels:
           alertgroup: taginfo
         for: 0m
         labels:
           alertgroup: taginfo
@@ -713,7 +807,7 @@ groups:
   - name: web
     rules:
       - alert: web error rate
   - name: web
     rules:
       - alert: web error rate
-        expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
+        expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.01
         for: 5m
         labels:
           alertgroup: web
         for: 5m
         labels:
           alertgroup: web