]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Switch to public overpass instance
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 61a7370b1289595afe098b8a721030e785e660e8..5b2ec56ac9623b2579c5c97ccebb3b7208af310b 100644 (file)
@@ -1,13 +1,6 @@
 # DO NOT EDIT - This file is being maintained by Chef
 
 groups:
-  - name: alertmanager
-    rules:
-      - alert: prometheus target missing
-        expr: up == 0
-        for: 10m
-        labels:
-          alertgroup: "prometheus"
   - name: amsterdam
     rules:
       - alert: pdu current draw
@@ -112,6 +105,36 @@ groups:
           alertgroup: database
         annotations:
           delay: "{{ $value | humanizeDuration }}"
+  - name: dublin
+    rules:
+      - alert: pdu current draw
+        expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
+        for: 5m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site current draw
+        expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
+        for: 5m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site temperature
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 25
+        for: 5m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          temperature: "{{ $value | humanize }}C"
+      - alert: site humidity
+        expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
+        for: 5m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          humidity: "{{ $value | humanizePercentage }}"
   - name: fastly
     rules:
       - alert: fastly error rate
@@ -122,7 +145,12 @@ groups:
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: fastly healthcheck failing
-        expr: fastly_healthcheck_status == 0
+        expr: count(fastly_healthcheck_status == 0) > 0
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: multiple fastly healthchecks failing
+        expr: count(fastly_healthcheck_status == 0) > 4
         for: 5m
         labels:
           alertgroup: fastly
@@ -219,12 +247,12 @@ groups:
   - name: juniper
     rules:
       - alert: juniper fan alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
+        expr: sum_over_time(jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"}[6m]) > 0
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
+        expr: sum_over_time(jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"}[6m]) > 0
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
@@ -322,7 +350,7 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
-      - alert: interface transmit errors
+      - alert: wireguard interface transmit errors
         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
         for: 1h
         labels:
@@ -343,6 +371,31 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           entries_used: "{{ $value | humanizePercentage }}"
+  - name: nominatim
+    rules:
+      - alert: nominatim replication delay
+        expr: nominatim_replication_delay > 10800
+        for: 1h
+        labels:
+          alertgroup: nominatim
+        annotations:
+          delay: "{{ $value | humanizeDuration }}"
+  - name: overpass
+    rules:
+      - alert: overpass osm database age
+        expr: overpass_database_age_seconds{database="osm"} > 3600
+        for: 1h
+        labels:
+          alertgroup: overpass
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
+      - alert: overpass area database age
+        expr: overpass_database_age_seconds{database="area"} > 86400
+        for: 1h
+        labels:
+          alertgroup: overpass
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
   - name: planet
     rules:
       - alert: planet dump overdue
@@ -409,7 +462,7 @@ groups:
         annotations:
           connections_used: "{{ $value | humanizePercentage }}"
       - alert: postgresql deadlocks
-        expr: increase(pg_stat_database_deadlocks[1m]) > 5
+        expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -422,6 +475,18 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
+  - name: prometheus
+    rules:
+      - alert: prometheus configuration error
+        expr: prometheus_config_last_reload_successful == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+      - alert: prometheus target missing
+        expr: up == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
   - name: raid
     rules:
       - alert: raid array degraded
@@ -434,6 +499,22 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
+  - name: rasdaemon
+    rules:
+      - alert: memory controller errors
+        expr: increase(rasdaemon_mc_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_errors: "{{ $value }}"
+      - alert: pcie aer errors
+        expr: increase(rasdaemon_aer_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_ercrors: "{{ $value }}"
   - name: smart
     rules:
       - alert: smart failure
@@ -442,7 +523,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 90
+        expr: smart_percentage_used >= 80
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -486,7 +567,7 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
-      - alert: systemd failed service
+      - alert: systemd failed chef client service
         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
         for: 6h
         labels: