]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
nominatim: switch stormfly to python frontend again
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index b0094a83b24869ba8e5555751b45e5f1418135c3..d717e4f0aeedacf3f5c8fc628485c9b945eedc05 100644 (file)
@@ -3,20 +3,27 @@
 groups:
   - name: amsterdam
     rules:
 groups:
   - name: amsterdam
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
       - alert: pdu current draw
-        expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
+        expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
-      - alert: site current draw
-        expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
+      - alert: site power
+        expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
         for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
-          current: "{{ $value | humanize }}A"
+          current: "{{ $value | humanize }}kVA"
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
         for: 6m
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
         for: 6m
@@ -45,13 +52,6 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
-      - alert: apache low request rate
-        expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
-        for: 15m
-        labels:
-          alertgroup: "{{ $labels.instance }}"
-        annotations:
-          request_rate: "{{ $value | humanizePercentage }}"
   - name: chef
     rules:
       - alert: chef client not running
   - name: chef
     rules:
       - alert: chef client not running
@@ -90,8 +90,8 @@ groups:
   - name: cpu
     rules:
       - alert: cpu pressure
   - name: cpu
     rules:
       - alert: cpu pressure
-        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
-        for: 15m
+        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
+        for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
@@ -116,20 +116,27 @@ groups:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
       - alert: pdu current draw
-        expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
+        expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
         for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
-      - alert: site current draw
-        expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
+      - alert: site power
+        expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
         for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
         for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
-          current: "{{ $value | humanize }}A"
+          current: "{{ $value | humanize }}kVA"
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
         for: 6m
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
         for: 6m
@@ -153,13 +160,23 @@ groups:
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
-      - alert: fastly healthcheck failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 0
+      - alert: fastly frontend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
         for: 15m
         labels:
           alertgroup: fastly
         for: 15m
         labels:
           alertgroup: fastly
-      - alert: multiple fastly healthchecks failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 4
+      - alert: fastly frontend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
+        for: 5m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
         for: 5m
         labels:
           alertgroup: fastly
         for: 5m
         labels:
           alertgroup: fastly
@@ -255,18 +272,34 @@ groups:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
+      - alert: juniper red alarms
+        expr: juniper_alarms_red_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
+      - alert: juniper yellow alarms
+        expr: juniper_alarms_yellow_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
       - alert: juniper cpu alarm
       - alert: juniper cpu alarm
-        expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
+        expr: junos_route_engine_load_average_five / 2 > 0.5
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          load_average: "{{ $value | humanizePercentage }}"
       - alert: juniper fan alarm
       - alert: juniper fan alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_fan_up != 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_power_up != 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
@@ -278,7 +311,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: exim queue length
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: exim queue length
-        expr: exim_queue > exim_queue_limit
+        expr: exim_queue > ignoring(job) exim_queue_limit
         for: 60m
         labels:
           alertgroup: mail
         for: 60m
         labels:
           alertgroup: mail
@@ -363,14 +396,14 @@ groups:
   - name: network
     rules:
       - alert: interface transmit rate
   - name: network
     rules:
       - alert: interface transmit rate
-        expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface receive rate
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface receive rate
-        expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -525,6 +558,13 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
+      - alert: postgresql idle transactions
+        expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          queries: "{{ $value }}"
   - name: prometheus
     rules:
       - alert: prometheus configuration error
   - name: prometheus
     rules:
       - alert: prometheus configuration error
@@ -544,6 +584,11 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
+      - alert: raid controller battery recharging
+        expr: ohai_controller_info{battery_status="recharging"} > 0
+        for: 4h
+        labels:
+          alertgroup: "{{ $labels.instance }}"
       - alert: raid array degraded
         expr: ohai_array_info{status="degraded"} > 0
         for: 5m
       - alert: raid array degraded
         expr: ohai_array_info{status="degraded"} > 0
         for: 5m
@@ -578,7 +623,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 80
+        expr: smart_percentage_used / 100 >= 0.8
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -587,8 +632,8 @@ groups:
   - name: smokeping
     rules:
       - alert: packet loss
   - name: smokeping
     rules:
       - alert: packet loss
-        expr: 100 - (rate(smokeping_response_duration_seconds_count[5m]) * 100 / rate(smokeping_requests_total[5m])) > 0
-        for: 5m
+        expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
+        for: 10m
         labels:
           alertgroup: smokeping
         annotations:
         labels:
           alertgroup: smokeping
         annotations:
@@ -645,6 +690,29 @@ groups:
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
+  - name: taginfo
+    rules:
+      - alert: taginfo planet age
+        expr: time() - taginfo_data_from_seconds > 129600
+        for: 0m
+        labels:
+          alertgroup: taginfo
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
+      - alert: taginfo database age
+        expr: time() - taginfo_database_update_finish_seconds > 129600
+        for: 0m
+        labels:
+          alertgroup: taginfo
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
+      - alert: taginfo database size
+        expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
+        for: 30m
+        labels:
+          alertgroup: taginfo
+        annotations:
+          size_change: "{{ $value | humanizePercentage }}"
   - name: tile
     rules:
       - alert: renderd replication delay
   - name: tile
     rules:
       - alert: renderd replication delay
@@ -692,8 +760,8 @@ groups:
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: job processing rate
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: job processing rate
-        expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
-        for: 15m
+        expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
+        for: 1h
         labels:
           alertgroup: web
         annotations:
         labels:
           alertgroup: web
         annotations: