1 # DO NOT EDIT - This file is being maintained by Chef
6 - alert: prometheus target missing
10 alertgroup: "prometheus"
13 - alert: pdu current draw
14 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
17 alertgroup: "amsterdam"
19 current: "{{ $value | humanize }}A"
20 - alert: site current draw
21 expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
24 alertgroup: "amsterdam"
26 current: "{{ $value | humanize }}A"
27 - alert: site temperature
28 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
31 alertgroup: "amsterdam"
33 temperature: "{{ $value | humanize }}C"
34 - alert: site humidity
35 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
38 alertgroup: "amsterdam"
40 humidity: "{{ $value | humanizePercentage }}"
47 alertgroup: "{{ $labels.instance }}"
48 - alert: apache workers busy
49 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
52 alertgroup: "{{ $labels.instance }}"
54 busy_workers: "{{ $value | humanizePercentage }}"
55 - alert: apache low request rate
56 expr: rate(apache_accesses_total[$__rate_interval]) / rate(apache_accesses_total[$__rate_interval] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
59 alertgroup: "{{ $labels.instance }}"
61 request_rate: "{{ $value | humanizePercentage }}"
64 - alert: chef client not running
65 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
68 alertgroup: "{{ $labels.instance }}"
70 down_time: "{{ $value | humanizeDuration }}"
74 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
77 alertgroup: "{{ $labels.instance }}"
79 pressure: "{{ $value | humanizePercentage }}"
82 - alert: postgres replication delay
83 expr: pg_replication_lag_seconds > 5
88 delay: "{{ $value | humanizeDuration }}"
92 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
97 error_rate: "{{ $value | humanizePercentage }}"
100 - alert: readonly filesystem
101 expr: node_filesystem_readonly == 1
104 alertgroup: "{{ $labels.instance }}"
105 - alert: filesystem low on space
106 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
109 alertgroup: "{{ $labels.instance }}"
111 percentage_free: "{{ $value | humanizePercentage }}"
112 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
113 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
114 - alert: filesystem low on inodes
115 expr: node_filesystem_files_free / node_filesystem_files < 0.1
118 alertgroup: "{{ $labels.instance }}"
120 percentage_free: "{{ $value | humanizePercentage }}"
121 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
122 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
125 - alert: hwmon fan alarm
126 expr: node_hwmon_fan_alarm == 1
129 alertgroup: "{{ $labels.instance }}"
131 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
132 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
133 - alert: hwmon temperature alarm
134 expr: node_hwmon_temp_alarm == 1
137 alertgroup: "{{ $labels.instance }}"
139 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
140 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
141 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
142 - alert: hwmon voltage alarm
143 expr: node_hwmon_in_alarm == 1
146 alertgroup: "{{ $labels.instance }}"
148 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
149 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
150 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
154 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
157 alertgroup: "{{ $labels.instance }}"
159 pressure: "{{ $value | humanizePercentage }}"
162 - alert: ipmi fan alarm
163 expr: ipmi_fan_speed_state > 0
166 alertgroup: "{{ $labels.instance }}"
168 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
169 - alert: ipmi temperature alarm
170 expr: ipmi_temperature_state > 0
173 alertgroup: "{{ $labels.instance }}"
175 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
176 - alert: ipmi voltage alarm
177 expr: ipmi_voltage_state > 0
180 alertgroup: "{{ $labels.instance }}"
182 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
183 - alert: ipmi power alarm
184 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
187 alertgroup: "{{ $labels.instance }}"
190 - alert: exim queue length
191 expr: exim_queue > exim_queue_limit
196 queue_length: "{{ $value }}"
197 - alert: mailman queue length
198 expr: mailman_queue_length > 200
203 queue_length: "{{ $value }}"
206 - alert: mdadm array inactive
207 expr: node_md_state{state="inactive"} > 0
210 alertgroup: "{{ $labels.instance }}"
212 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
213 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
214 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
215 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
216 - alert: mdadm array degraded
217 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
220 alertgroup: "{{ $labels.instance }}"
222 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
223 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
224 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
225 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
226 - alert: mdadm disk failed
227 expr: node_md_disks{state="failed"} > 0
230 alertgroup: "{{ $labels.instance }}"
232 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
233 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
234 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
235 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
239 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
242 alertgroup: "{{ $labels.instance }}"
244 memory_free: "{{ $value | humanizePercentage }}"
245 - alert: memory pressure
246 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
249 alertgroup: "{{ $labels.instance }}"
251 pressure: "{{ $value | humanizePercentage }}"
252 - alert: oom kill detected
253 expr: increase(node_vmstat_oom_kill[1m]) > 0
256 alertgroup: "{{ $labels.instance }}"
258 new_oom_kills: "{{ $value }}"
261 - alert: interface transmit rate
262 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
265 alertgroup: "{{ $labels.instance }}"
267 bandwidth_used: "{{ $value | humanizePercentage }}"
268 - alert: interface receive rate
269 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
272 alertgroup: "{{ $labels.instance }}"
274 bandwidth_used: "{{ $value | humanizePercentage }}"
275 - alert: interface transmit errors
276 expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
279 alertgroup: "{{ $labels.instance }}"
281 error_rate: "{{ $value | humanizePercentage }}"
282 - alert: interface receive errors
283 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
286 alertgroup: "{{ $labels.instance }}"
288 error_rate: "{{ $value | humanizePercentage }}"
289 - alert: conntrack entries
290 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
293 alertgroup: "{{ $labels.instance }}"
295 entries_used: "{{ $value | humanizePercentage }}"
298 - alert: planet dump overdue
299 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
304 overdue_by: "{{ $value | humanizeDuration }}"
305 - alert: notes dump overdue
306 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
311 overdue_by: "{{ $value | humanizeDuration }}"
312 - alert: daily replication feed delayed
313 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
318 delayed_by: "{{ $value | humanizeDuration }}"
319 - alert: hourly replication feed delayed
320 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
325 delayed_by: "{{ $value | humanizeDuration }}"
326 - alert: minutely replication feed delayed
327 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
332 delayed_by: "{{ $value | humanizeDuration }}"
333 - alert: changeset replication feed delayed
334 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
339 delayed_by: "{{ $value | humanizeDuration }}"
342 - alert: postgresql down
346 alertgroup: "{{ $labels.instance }}"
347 - alert: postgresql replication delay
348 expr: pg_replication_lag_seconds > 5
351 alertgroup: "{{ $labels.instance }}"
353 delay: "{{ $value | humanizeDuration }}"
354 - alert: postgresql connection limit
355 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
358 alertgroup: "{{ $labels.instance }}"
360 connections_used: "{{ $value | humanizePercentage }}"
361 - alert: postgresql deadlocks
362 expr: increase(pg_stat_database_deadlocks[1m]) > 5
365 alertgroup: "{{ $labels.instance }}"
367 new_deadlocks: "{{ $value }}"
368 - alert: postgresql slow queries
369 expr: pg_slow_queries > 0
372 alertgroup: "{{ $labels.instance }}"
374 queries: "{{ $value }}"
377 - alert: smart failure
378 expr: smart_health_status == 0
381 alertgroup: "{{ $labels.instance }}"
382 - alert: smart ssd wearout approaching
383 expr: smart_percentage_used >= 90
386 alertgroup: "{{ $labels.instance }}"
388 percentage_used: "{{ $value | humanizePercentage }}"
391 - alert: ssl certificate probe failed
392 expr: ssl_probe_success == 0
396 - alert: ssl certificate expiry
397 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
402 expires_in: "{{ $value | humanizeDuration }}"
403 - alert: ssl certificate revoked
404 expr: ssl_ocsp_response_status == 1
408 - alert: ocsp status unknown
409 expr: ssl_ocsp_response_status == 1
415 - alert: statuscake uptime check failing
416 expr: statuscake_uptime{status="down",paused="false"} > 0
419 alertgroup: statuscake
422 - alert: systemd failed service
423 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
426 alertgroup: "{{ $labels.instance }}"
427 - alert: systemd failed service
428 expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
431 alertgroup: "{{ $labels.instance }}"
434 - alert: renderd replication delay
435 expr: renderd_replication_delay > 120
440 delay: "{{ $value | humanizeDuration }}"
441 - alert: missed tile rate
442 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
447 miss_rate: "{{ $value | humanizePercentage }}"
450 - alert: clock not synchronising
451 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
454 alertgroup: "{{ $labels.instance }}"
455 - alert: clock skew detected
456 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
459 alertgroup: "{{ $labels.instance }}"
461 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
464 - alert: web error rate
465 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
470 error_rate: "{{ $value | humanizePercentage }}"
471 - alert: job processing rate
472 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
477 job_processing_rate: "{{ $value | humanizePercentage }}"