]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
nominatim: remove extra listen options in nginx config
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index c0f5e0c7758c7a7acb5a4c1e631c7e268fd0d315..92c38d5c5d1f35e54db24742a8a74747aa7b21b4 100644 (file)
@@ -22,6 +22,15 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
+  - name: cpu
+    rules:
+      - alert: cpu pressure
+        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.3
+        for: 15m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          pressure: "{{ $value | humanizePercentage }}"
   - name: database
     rules:
       - alert: postgres replication delay
@@ -31,8 +40,22 @@ groups:
           alertgroup: database
         annotations:
           delay: "{{ $value | humanizeDuration }}"
+  - name: fastly
+    rules:
+      - alert: error rate
+        expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
+        for: 15m
+        labels:
+          alertgroup: fastly
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
   - name: filesystem
     rules:
+      - alert: readonly filesystem
+        expr: node_filesystem_readonly == 1
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
       - alert: filesystem low on space
         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
         for: 5m
@@ -79,6 +102,15 @@ groups:
           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
+  - name: io
+    rules:
+      - alert: io pressure
+        expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
+        for: 60m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          pressure: "{{ $value | humanizePercentage }}"
   - name: ipmi
     rules:
       - alert: ipmi fan alarm
@@ -107,6 +139,22 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
+  - name: mail
+    rules:
+      - alert: exim queue length
+        expr: exim_queue > exim_queue_limit
+        for: 60m
+        labels:
+          alertgroup: mail
+        annotations:
+          queue_length: "{{ $value }}"
+      - alert: mailman queue length
+        expr: mailman_queue_length > 200
+        for: 60m
+        labels:
+          alertgroup: mail
+        annotations:
+          queue_length: "{{ $value }}"
   - name: mdadm
     rules:
       - alert: mdadm array inactive
@@ -143,18 +191,18 @@ groups:
     rules:
       - alert: low memory
         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
-        for: 5m
+        for: 15m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           memory_free: "{{ $value | humanizePercentage }}"
       - alert: memory pressure
-        expr: rate(node_vmstat_pgmajfault[1m]) > 1000
-        for: 5m
+        expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
+        for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
-          major_page_faults: "{{ $value }} faults/s"
+          pressure: "{{ $value | humanizePercentage }}"
       - alert: oom kill detected
         expr: increase(node_vmstat_oom_kill[1m]) > 0
         for: 0m