]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Fix error in rasdaemon alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 96f7fdb0248045de20c050d159a8e5f18eef681a..23b94727cfc6e47b929467f01baab325559653a0 100644 (file)
@@ -1,13 +1,6 @@
 # DO NOT EDIT - This file is being maintained by Chef
 
 groups:
-  - name: alertmanager
-    rules:
-      - alert: prometheus target missing
-        expr: up == 0
-        for: 5m
-        labels:
-          alertgroup: "prometheus"
   - name: amsterdam
     rules:
       - alert: pdu current draw
@@ -75,11 +68,15 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
       - alert: cisco temperature alarm
         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
       - alert: cisco main power alarm
         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
         for: 5m
@@ -110,13 +107,23 @@ groups:
           delay: "{{ $value | humanizeDuration }}"
   - name: fastly
     rules:
-      - alert: error rate
+      - alert: fastly error rate
         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
         for: 15m
         labels:
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
+      - alert: fastly healthcheck failing
+        expr: count(fastly_healthcheck_status == 0) > 0
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: fastly healthcheck failing
+        expr: count(fastly_healthcheck_status == 0) > 4
+        for: 5m
+        labels:
+          alertgroup: fastly
   - name: filesystem
     rules:
       - alert: readonly filesystem
@@ -307,12 +314,19 @@ groups:
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface transmit errors
-        expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+        expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
+      - alert: interface transmit errors
+        expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
+        for: 1h
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
       - alert: interface receive errors
         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
         for: 5m
@@ -393,7 +407,7 @@ groups:
         annotations:
           connections_used: "{{ $value | humanizePercentage }}"
       - alert: postgresql deadlocks
-        expr: increase(pg_stat_database_deadlocks[1m]) > 5
+        expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -406,6 +420,46 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
+  - name: prometheus
+    rules:
+      - alert: prometheus configuration error
+        expr: prometheus_config_last_reload_successful == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+      - alert: prometheus target missing
+        expr: up == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+  - name: raid
+    rules:
+      - alert: raid array degraded
+        expr: ohai_array_info{status="degraded"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: raid disk failed
+        expr: ohai_disk_info{status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+  - name: rasdaemon
+    rules:
+      - alert: memory controller errors
+        expr: increase(rasdaemon_mc_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_errors: "{{ $value }}"
+      - alert: pcie aer errors
+        expr: increase(rasdaemon_aer_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_ercrors: "{{ $value }}"
   - name: smart
     rules:
       - alert: smart failure
@@ -414,7 +468,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 90
+        expr: smart_percentage_used >= 80
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -448,7 +502,7 @@ groups:
     rules:
       - alert: statuscake uptime check failing
         expr: statuscake_uptime{status="down",paused="false"} > 0
-        for: 0m
+        for: 10m
         labels:
           alertgroup: statuscake
   - name: systemd