]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Add alerts for site power usage in Amsterdam and Dublin
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 367e07650255de3383d13955ea1b73325bf3fc8d..8f2986e6ab85340d339be8e23f9adc219f0911ac 100644 (file)
@@ -17,6 +17,13 @@ groups:
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
+      - alert: site power
+        expr: sum(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 100) > 3
+        for: 0m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          current: "{{ $value | humanize }}kVA"
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
         for: 6m
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
         for: 6m
@@ -90,8 +97,8 @@ groups:
   - name: cpu
     rules:
       - alert: cpu pressure
   - name: cpu
     rules:
       - alert: cpu pressure
-        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
-        for: 15m
+        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
+        for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
@@ -99,12 +106,21 @@ groups:
   - name: database
     rules:
       - alert: postgres replication delay
   - name: database
     rules:
       - alert: postgres replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 5m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: database
         annotations:
           delay: "{{ $value | humanizeDuration }}"
         labels:
           alertgroup: database
         annotations:
           delay: "{{ $value | humanizeDuration }}"
+  - name: discourse
+    rules:
+      - alert: discourse job failure rate
+        expr: rate(discourse_job_failures[5m]) > 0
+        for: 5m
+        labels:
+          alertgroup: discourse
+        annotations:
+          failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
       - alert: pdu current draw
   - name: dublin
     rules:
       - alert: pdu current draw
@@ -121,6 +137,13 @@ groups:
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
+      - alert: site power
+        expr: sum(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"} / 100) > 4
+        for: 0m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          current: "{{ $value | humanize }}kVA"
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
         for: 6m
       - alert: site temperature
         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
         for: 6m
@@ -489,8 +512,8 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: postgresql replication delay
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: postgresql replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 1m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
@@ -530,6 +553,11 @@ groups:
           alertgroup: "prometheus"
   - name: raid
     rules:
           alertgroup: "prometheus"
   - name: raid
     rules:
+      - alert: raid controller battery failed
+        expr: ohai_controller_info{battery_status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
       - alert: raid array degraded
         expr: ohai_array_info{status="degraded"} > 0
         for: 5m
       - alert: raid array degraded
         expr: ohai_array_info{status="degraded"} > 0
         for: 5m
@@ -564,12 +592,21 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 80
+        expr: smart_percentage_used / 100 >= 0.8
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           percentage_used: "{{ $value | humanizePercentage }}"
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           percentage_used: "{{ $value | humanizePercentage }}"
+  - name: smokeping
+    rules:
+      - alert: packet loss
+        expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
+        for: 10m
+        labels:
+          alertgroup: smokeping
+        annotations:
+          loss_rate: "{{ $value | humanizePercentage }}"
   - name: snmp
     rules:
       - alert: snmp pdus missing
   - name: snmp
     rules:
       - alert: snmp pdus missing
@@ -606,7 +643,7 @@ groups:
   - name: statuscake
     rules:
       - alert: statuscake uptime check failing
   - name: statuscake
     rules:
       - alert: statuscake uptime check failing
-        expr: statuscake_uptime{status="down",paused="false"} > 0
+        expr: statuscake_paused == 0 and statuscake_up == 0
         for: 10m
         labels:
           alertgroup: statuscake
         for: 10m
         labels:
           alertgroup: statuscake
@@ -618,8 +655,8 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: systemd failed chef client service
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: systemd failed chef client service
-        expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
-        for: 6h
+        expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
+        for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
   - name: tile
         labels:
           alertgroup: "{{ $labels.instance }}"
   - name: tile