]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Merge remote-tracking branch 'github/pull/527'
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 94a2323d358a03ddc25213ad7c2e04c0945b0b17..28a5b311363a2fe2565bc92906a9dce803e6c39a 100644 (file)
@@ -1,13 +1,6 @@
 # DO NOT EDIT - This file is being maintained by Chef
 
 groups:
-  - name: alertmanager
-    rules:
-      - alert: prometheus target missing
-        expr: up == 0
-        for: 5m
-        labels:
-          alertgroup: "prometheus"
   - name: amsterdam
     rules:
       - alert: pdu current draw
@@ -114,13 +107,23 @@ groups:
           delay: "{{ $value | humanizeDuration }}"
   - name: fastly
     rules:
-      - alert: error rate
+      - alert: fastly error rate
         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
         for: 15m
         labels:
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
+      - alert: fastly healthcheck failing
+        expr: count(fastly_healthcheck_status == 0) > 0
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: fastly healthcheck failing
+        expr: count(fastly_healthcheck_status == 0) > 4
+        for: 5m
+        labels:
+          alertgroup: fastly
   - name: filesystem
     rules:
       - alert: readonly filesystem
@@ -311,12 +314,19 @@ groups:
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface transmit errors
-        expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+        expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
+      - alert: interface transmit errors
+        expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
+        for: 1h
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
       - alert: interface receive errors
         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
         for: 5m
@@ -410,6 +420,30 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
+  - name: prometheus
+    rules:
+      - alert: prometheus configuration error
+        expr: prometheus_config_last_reload_successful == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+      - alert: prometheus target missing
+        expr: up == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+  - name: raid
+    rules:
+      - alert: raid array degraded
+        expr: ohai_array_info{status="degraded"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: raid disk failed
+        expr: ohai_disk_info{status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
   - name: smart
     rules:
       - alert: smart failure
@@ -418,7 +452,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 90
+        expr: smart_percentage_used >= 80
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -452,7 +486,7 @@ groups:
     rules:
       - alert: statuscake uptime check failing
         expr: statuscake_uptime{status="down",paused="false"} > 0
-        for: 0m
+        for: 10m
         labels:
           alertgroup: statuscake
   - name: systemd