1 # DO NOT EDIT - This file is being maintained by Chef
6 - alert: prometheus target missing
10 alertgroup: "prometheus"
17 alertgroup: "{{ $labels.instance }}"
18 - alert: apache workers busy
19 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
22 alertgroup: "{{ $labels.instance }}"
24 busy_workers: "{{ $value | humanizePercentage }}"
28 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.3
31 alertgroup: "{{ $labels.instance }}"
33 pressure: "{{ $value | humanizePercentage }}"
36 - alert: postgres replication delay
37 expr: pg_replication_lag_seconds > 5
42 delay: "{{ $value | humanizeDuration }}"
45 - alert: readonly filesystem
46 expr: node_filesystem_readonly == 1
49 alertgroup: "{{ $labels.instance }}"
50 - alert: filesystem low on space
51 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
54 alertgroup: "{{ $labels.instance }}"
56 percentage_free: "{{ $value | humanizePercentage }}"
57 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
58 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
59 - alert: filesystem low on inodes
60 expr: node_filesystem_files_free / node_filesystem_files < 0.1
63 alertgroup: "{{ $labels.instance }}"
65 percentage_free: "{{ $value | humanizePercentage }}"
66 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
67 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
70 - alert: hwmon fan alarm
71 expr: node_hwmon_fan_alarm == 1
74 alertgroup: "{{ $labels.instance }}"
76 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
77 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
78 - alert: hwmon temperature alarm
79 expr: node_hwmon_temp_alarm == 1
82 alertgroup: "{{ $labels.instance }}"
84 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
85 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
86 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
87 - alert: hwmon voltage alarm
88 expr: node_hwmon_in_alarm == 1
91 alertgroup: "{{ $labels.instance }}"
93 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
94 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
95 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
99 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
102 alertgroup: "{{ $labels.instance }}"
104 pressure: "{{ $value | humanizePercentage }}"
107 - alert: ipmi fan alarm
108 expr: ipmi_fan_speed_state > 0
111 alertgroup: "{{ $labels.instance }}"
113 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
114 - alert: ipmi temperature alarm
115 expr: ipmi_temperature_state > 0
118 alertgroup: "{{ $labels.instance }}"
120 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
121 - alert: ipmi voltage alarm
122 expr: ipmi_voltage_state > 0
125 alertgroup: "{{ $labels.instance }}"
127 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
128 - alert: ipmi power alarm
129 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
132 alertgroup: "{{ $labels.instance }}"
135 - alert: exim queue length
136 expr: exim_queue > exim_queue_limit
141 queue_length: "{{ $value }}"
142 - alert: mailman queue length
143 expr: mailman_queue_length > 200
148 queue_length: "{{ $value }}"
151 - alert: mdadm array inactive
152 expr: node_md_state{state="inactive"} > 0
155 alertgroup: "{{ $labels.instance }}"
157 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
158 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
159 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
160 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
161 - alert: mdadm array degraded
162 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
165 alertgroup: "{{ $labels.instance }}"
167 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
168 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
169 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
170 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
171 - alert: mdadm disk failed
172 expr: node_md_disks{state="failed"} > 0
175 alertgroup: "{{ $labels.instance }}"
177 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
178 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
179 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
180 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
184 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
187 alertgroup: "{{ $labels.instance }}"
189 memory_free: "{{ $value | humanizePercentage }}"
190 - alert: memory pressure
191 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
194 alertgroup: "{{ $labels.instance }}"
196 pressure: "{{ $value | humanizePercentage }}"
197 - alert: oom kill detected
198 expr: increase(node_vmstat_oom_kill[1m]) > 0
201 alertgroup: "{{ $labels.instance }}"
203 new_oom_kills: "{{ $value }}"
206 - alert: interface transmit rate
207 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
210 alertgroup: "{{ $labels.instance }}"
212 bandwidth_used: "{{ $value | humanizePercentage }}"
213 - alert: interface receive rate
214 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
217 alertgroup: "{{ $labels.instance }}"
219 bandwidth_used: "{{ $value | humanizePercentage }}"
220 - alert: interface transmit errors
221 expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
224 alertgroup: "{{ $labels.instance }}"
226 error_rate: "{{ $value | humanizePercentage }}"
227 - alert: interface receive errors
228 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
231 alertgroup: "{{ $labels.instance }}"
233 error_rate: "{{ $value | humanizePercentage }}"
234 - alert: conntrack entries
235 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
238 alertgroup: "{{ $labels.instance }}"
240 entries_used: "{{ $value | humanizePercentage }}"
243 - alert: planet dump overdue
244 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
249 overdue_by: "{{ $value | humanizeDuration }}"
250 - alert: notes dump overdue
251 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
256 overdue_by: "{{ $value | humanizeDuration }}"
257 - alert: daily replication feed delayed
258 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
263 delayed_by: "{{ $value | humanizeDuration }}"
264 - alert: hourly replication feed delayed
265 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
270 delayed_by: "{{ $value | humanizeDuration }}"
271 - alert: minutely replication feed delayed
272 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
277 delayed_by: "{{ $value | humanizeDuration }}"
278 - alert: changeset replication feed delayed
279 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
284 delayed_by: "{{ $value | humanizeDuration }}"
287 - alert: postgresql down
291 alertgroup: "{{ $labels.instance }}"
292 - alert: postgresql replication delay
293 expr: pg_replication_lag_seconds > 5
296 alertgroup: "{{ $labels.instance }}"
298 delay: "{{ $value | humanizeDuration }}"
299 - alert: postgresql connection limit
300 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
303 alertgroup: "{{ $labels.instance }}"
305 connections_used: "{{ $value | humanizePercentage }}"
306 - alert: postgresql deadlocks
307 expr: increase(pg_stat_database_deadlocks[1m]) > 5
310 alertgroup: "{{ $labels.instance }}"
312 new_deadlocks: "{{ $value }}"
313 - alert: postgresql slow queries
314 expr: pg_slow_queries > 0
317 alertgroup: "{{ $labels.instance }}"
319 queries: "{{ $value }}"
322 - alert: smart failure
323 expr: smart_health_status == 0
326 alertgroup: "{{ $labels.instance }}"
327 - alert: smart ssd wearout approaching
328 expr: smart_percentage_used >= 90
331 alertgroup: "{{ $labels.instance }}"
333 percentage_used: "{{ $value | humanizePercentage }}"
336 - alert: ssl certificate probe failed
337 expr: ssl_probe_success == 0
341 - alert: ssl certificate expiry
342 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
347 expires_in: "{{ $value | humanizeDuration }}"
348 - alert: ssl certificate revoked
349 expr: ssl_ocsp_response_status == 1
353 - alert: ocsp status unknown
354 expr: ssl_ocsp_response_status == 1
360 - alert: systemd failed service
361 expr: node_systemd_unit_state{state="failed"} == 1
364 alertgroup: "{{ $labels.instance }}"
367 - alert: renderd replication delay
368 expr: renderd_replication_delay > 120
373 delay: "{{ $value | humanizeDuration }}"
374 - alert: missed tile rate
375 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
380 miss_rate: "{{ $value | humanizePercentage }}"
383 - alert: clock not synchronising
384 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
387 alertgroup: "{{ $labels.instance }}"
388 - alert: clock skew detected
389 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
392 alertgroup: "{{ $labels.instance }}"
394 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
397 - alert: web error rate
398 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
403 error_rate: "{{ $value | humanizePercentage }}"