]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Add some additional prometheus alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index d2e076281dcdcc6f70f433a4d233414a5cb014e7..0e834474c692123c1bb80f7a02c23f6754b9c99d 100644 (file)
@@ -8,6 +8,29 @@ groups:
         for: 5m
         labels:
           alertgroup: "prometheus"
+  - name: apache
+    rules:
+      - alert: apache down
+        expr: apache_up == 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: apache workers busy
+        expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          busy_workers: "{{ $value | humanizePercentage }}"
+  - name: database
+    rules:
+      - alert: postgres replication delay
+        expr: pg_replication_lag_seconds > 5
+        for: 5m
+        labels:
+          alertgroup: database
+        annotations:
+          delay: "{{ $value | humanizeDuration }}"
   - name: hwmon
     rules:
       - alert: hwmon fan alarm
@@ -84,12 +107,12 @@ groups:
   - name: memory
     rules:
       - alert: low memory
-        expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
+        expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
-          memory_free: "{{ $value }}%"
+          memory_free: "{{ $value | humanizePercentage }}"
       - alert: memory pressure
         expr: rate(node_vmstat_pgmajfault[1m]) > 1000
         for: 5m
@@ -104,3 +127,129 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           new_oom_kills: "{{ $value }}"
+  - name: network
+    rules:
+      - alert: interface transmit rate
+        expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          bandwidth_used: "{{ $value | humanizePercentage }}"
+      - alert: interface receive rate
+        expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          bandwidth_used: "{{ $value | humanizePercentage }}"
+      - alert: interface transmit errors
+        expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
+      - alert: interface receive errors
+        expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
+      - alert: conntrack entries
+        expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          entries_used: "{{ $value | humanizePercentage }}"
+  - name: postgresql
+    rules:
+      - alert: postgresql down
+        expr: pg_up == 0
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: postgresql replication delay
+        expr: pg_replication_lag_seconds > 5
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          delay: "{{ $value | humanizeDuration }}"
+      - alert: postgresql connection limit
+        expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          connections_used: "{{ $value | humanizePercentage }}"
+      - alert: postgresql deadlocks
+        expr: increase(pg_stat_database_deadlocks[1m]) > 5
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_deadlocks: "{{ $value }}"
+      - alert: postgresql slow queries
+        expr: pg_slow_queries > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          queries: "{{ $value }}"
+  - name: ssl
+    rules:
+      - alert: ssl certificate probe failed
+        expr: ssl_probe_success == 0
+        for: 60m
+        labels:
+          alertgroup: ssl
+      - alert: ssl certificate expiry
+        expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
+        for: 0m
+        labels:
+          alertgroup: ssl
+        annotations:
+          expires_in: "{{ $value | humanizeDuration }}"
+      - alert: ssl certificate revoked
+        expr: ssl_ocsp_response_status == 1
+        for: 0m
+        labels:
+          alertgroup: ssl
+      - alert: ocsp status unknown
+        expr: ssl_ocsp_response_status == 1
+        for: 0m
+        labels:
+          alertgroup: ssl
+  - name: tile
+    rules:
+      - alert: renderd replication delay
+        expr: renderd_replication_delay > 120
+        for: 5m
+        labels:
+          alertgroup: tile
+        annotations:
+          delay: "{{ $value | humanizeDuration }}"
+      - alert: missed tile rate
+        expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
+        for: 5m
+        labels:
+          alertgroup: tile
+        annotations:
+          miss_rate: "{{ $value | humanizePercentage }}"
+  - name: time
+    rules:
+      - alert: clock not synchronising
+        expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: clock skew detected
+        expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"