]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Fix exim mail queue alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site power
14         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}kVA"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48   - name: chef
49     rules:
50       - alert: chef client not running
51         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
52         for: 12h
53         labels:
54           alertgroup: "{{ $labels.instance }}"
55         annotations:
56           down_time: "{{ $value | humanizeDuration }}"
57   - name: cisco
58     rules:
59       - alert: cisco fan alarm
60         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
61         for: 5m
62         labels:
63           alertgroup: "{{ $labels.site }}"
64         annotations:
65           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
66       - alert: cisco temperature alarm
67         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
73       - alert: cisco main power alarm
74         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78       - alert: cisco redundant power alarm
79         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
80         for: 5m
81         labels:
82           alertgroup: "{{ $labels.site }}"
83   - name: cpu
84     rules:
85       - alert: cpu pressure
86         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
87         for: 60m
88         labels:
89           alertgroup: "{{ $labels.instance }}"
90         annotations:
91           pressure: "{{ $value | humanizePercentage }}"
92   - name: database
93     rules:
94       - alert: postgres replication delay
95         expr: pg_replication_lag_seconds > 30
96         for: 15m
97         labels:
98           alertgroup: database
99         annotations:
100           delay: "{{ $value | humanizeDuration }}"
101   - name: discourse
102     rules:
103       - alert: discourse job failure rate
104         expr: rate(discourse_job_failures[5m]) > 0
105         for: 5m
106         labels:
107           alertgroup: discourse
108         annotations:
109           failure_rate: "{{ $value }} jobs/s"
110   - name: dublin
111     rules:
112       - alert: pdu current draw
113         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
114         for: 6m
115         labels:
116           alertgroup: "dublin"
117         annotations:
118           current: "{{ $value | humanize }}A"
119       - alert: site power
120         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
121         for: 6m
122         labels:
123           alertgroup: "dublin"
124         annotations:
125           current: "{{ $value | humanize }}kVA"
126       - alert: site temperature
127         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           temperature: "{{ $value | humanize }}C"
133       - alert: site humidity
134         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           humidity: "{{ $value | humanizePercentage }}"
140   - name: fastly
141     rules:
142       - alert: fastly error rate
143         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
144         for: 15m
145         labels:
146           alertgroup: fastly
147         annotations:
148           error_rate: "{{ $value | humanizePercentage }}"
149       - alert: fastly healthcheck failing
150         expr: count(fastly_healthcheck_status == 0) by (service) > 0
151         for: 15m
152         labels:
153           alertgroup: fastly
154       - alert: multiple fastly healthchecks failing
155         expr: count(fastly_healthcheck_status == 0) by (service) > 4
156         for: 5m
157         labels:
158           alertgroup: fastly
159   - name: filesystem
160     rules:
161       - alert: readonly filesystem
162         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
163         for: 0m
164         labels:
165           alertgroup: "{{ $labels.instance }}"
166       - alert: filesystem low on space
167         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
168         for: 5m
169         labels:
170           alertgroup: "{{ $labels.instance }}"
171         annotations:
172           percentage_free: "{{ $value | humanizePercentage }}"
173           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
174           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
175       - alert: filesystem low on inodes
176         expr: node_filesystem_files_free / node_filesystem_files < 0.1
177         for: 5m
178         labels:
179           alertgroup: "{{ $labels.instance }}"
180         annotations:
181           percentage_free: "{{ $value | humanizePercentage }}"
182           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
183           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
184   - name: hwmon
185     rules:
186       - alert: hwmon fan alarm
187         expr: node_hwmon_fan_alarm == 1
188         for: 5m
189         labels:
190           alertgroup: "{{ $labels.instance }}"
191         annotations:
192           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
193           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
194       - alert: hwmon temperature alarm
195         expr: node_hwmon_temp_alarm == 1
196         for: 5m
197         labels:
198           alertgroup: "{{ $labels.instance }}"
199         annotations:
200           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
201           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
202           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
203       - alert: hwmon voltage alarm
204         expr: node_hwmon_in_alarm == 1
205         for: 5m
206         labels:
207           alertgroup: "{{ $labels.instance }}"
208         annotations:
209           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
210           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
211           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
212   - name: io
213     rules:
214       - alert: io pressure
215         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
216         for: 60m
217         labels:
218           alertgroup: "{{ $labels.instance }}"
219         annotations:
220           pressure: "{{ $value | humanizePercentage }}"
221   - name: ipmi
222     rules:
223       - alert: ipmi fan alarm
224         expr: ipmi_fan_speed_state > 0
225         for: 5m
226         labels:
227           alertgroup: "{{ $labels.instance }}"
228         annotations:
229           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
230       - alert: ipmi temperature alarm
231         expr: ipmi_temperature_state > 0
232         for: 5m
233         labels:
234           alertgroup: "{{ $labels.instance }}"
235         annotations:
236           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
237       - alert: ipmi voltage alarm
238         expr: ipmi_voltage_state > 0
239         for: 5m
240         labels:
241           alertgroup: "{{ $labels.instance }}"
242         annotations:
243           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
244       - alert: ipmi power alarm
245         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
246         for: 5m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249   - name: juniper
250     rules:
251       - alert: juniper cpu alarm
252         expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
253         for: 5m
254         labels:
255           alertgroup: "{{ $labels.site }}"
256       - alert: juniper fan alarm
257         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
258         for: 5m
259         labels:
260           alertgroup: "{{ $labels.site }}"
261       - alert: juniper power alarm
262         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.site }}"
266   - name: mail
267     rules:
268       - alert: exim down
269         expr: exim_up == 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273       - alert: exim queue length
274         expr: exim_queue > ignoring(job) exim_queue_limit
275         for: 60m
276         labels:
277           alertgroup: mail
278         annotations:
279           queue_length: "{{ $value }}"
280       - alert: mailman queue length
281         expr: mailman_queue_length > 200
282         for: 60m
283         labels:
284           alertgroup: mail
285         annotations:
286           queue_length: "{{ $value }}"
287   - name: mdadm
288     rules:
289       - alert: mdadm array inactive
290         expr: node_md_state{state="inactive"} > 0
291         for: 0m
292         labels:
293           alertgroup: "{{ $labels.instance }}"
294         annotations:
295           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
296           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
297           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
298           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
299       - alert: mdadm array degraded
300         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
301         for: 0m
302         labels:
303           alertgroup: "{{ $labels.instance }}"
304         annotations:
305           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
306           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
307           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
308           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
309       - alert: mdadm disk failed
310         expr: node_md_disks{state="failed"} > 0
311         for: 0m
312         labels:
313           alertgroup: "{{ $labels.instance }}"
314         annotations:
315           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
316           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
317           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
318           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
319   - name: memory
320     rules:
321       - alert: low memory
322         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
323         for: 15m
324         labels:
325           alertgroup: "{{ $labels.instance }}"
326         annotations:
327           memory_free: "{{ $value | humanizePercentage }}"
328       - alert: memory pressure
329         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
330         for: 60m
331         labels:
332           alertgroup: "{{ $labels.instance }}"
333         annotations:
334           pressure: "{{ $value | humanizePercentage }}"
335       - alert: oom kill detected
336         expr: increase(node_vmstat_oom_kill[1m]) > 0
337         for: 0m
338         labels:
339           alertgroup: "{{ $labels.instance }}"
340         annotations:
341           new_oom_kills: "{{ $value }}"
342   - name: mysql
343     rules:
344       - alert: mysql down
345         expr: mysql_up == 0
346         for: 1m
347         labels:
348           alertgroup: "{{ $labels.instance }}"
349       - alert: mysql connection limit
350         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
351         for: 1m
352         labels:
353           alertgroup: "{{ $labels.instance }}"
354         annotations:
355           connections_used: "{{ $value | humanizePercentage }}"
356   - name: network
357     rules:
358       - alert: interface transmit rate
359         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
360         for: 5m
361         labels:
362           alertgroup: "{{ $labels.instance }}"
363         annotations:
364           bandwidth_used: "{{ $value | humanizePercentage }}"
365       - alert: interface receive rate
366         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
367         for: 5m
368         labels:
369           alertgroup: "{{ $labels.instance }}"
370         annotations:
371           bandwidth_used: "{{ $value | humanizePercentage }}"
372       - alert: interface transmit errors
373         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
374         for: 5m
375         labels:
376           alertgroup: "{{ $labels.instance }}"
377         annotations:
378           error_rate: "{{ $value | humanizePercentage }}"
379       - alert: wireguard interface transmit errors
380         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
381         for: 1h
382         labels:
383           alertgroup: "{{ $labels.instance }}"
384         annotations:
385           error_rate: "{{ $value | humanizePercentage }}"
386       - alert: interface receive errors
387         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
388         for: 5m
389         labels:
390           alertgroup: "{{ $labels.instance }}"
391         annotations:
392           error_rate: "{{ $value | humanizePercentage }}"
393       - alert: conntrack entries
394         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
395         for: 5m
396         labels:
397           alertgroup: "{{ $labels.instance }}"
398         annotations:
399           entries_used: "{{ $value | humanizePercentage }}"
400   - name: nominatim
401     rules:
402       - alert: nominatim replication delay
403         expr: nominatim_replication_delay > 10800
404         for: 1h
405         labels:
406           alertgroup: nominatim
407         annotations:
408           delay: "{{ $value | humanizeDuration }}"
409   - name: overpass
410     rules:
411       - alert: overpass osm database age
412         expr: overpass_database_age_seconds{database="osm"} > 3600
413         for: 1h
414         labels:
415           alertgroup: overpass
416         annotations:
417           age: "{{ $value | humanizeDuration }}"
418       - alert: overpass area database age
419         expr: overpass_database_age_seconds{database="area"} > 86400
420         for: 1h
421         labels:
422           alertgroup: overpass
423         annotations:
424           age: "{{ $value | humanizeDuration }}"
425   - name: passenger
426     rules:
427       - alert: passenger down
428         expr: passenger_up == 0
429         for: 5m
430         labels:
431           alertgroup: "{{ $labels.instance }}"
432       - alert: passenger queuing
433         expr: passenger_top_level_request_queue > 0
434         for: 5m
435         labels:
436           alertgroup: "{{ $labels.instance }}"
437       - alert: passenger application queuing
438         expr: passenger_app_request_queue > 0
439         for: 5m
440         labels:
441           alertgroup: "{{ $labels.instance }}"
442   - name: planet
443     rules:
444       - alert: planet dump overdue
445         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
446         for: 24h
447         labels:
448           alertgroup: planet
449         annotations:
450           overdue_by: "{{ $value | humanizeDuration }}"
451       - alert: notes dump overdue
452         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
453         for: 6h
454         labels:
455           alertgroup: planet
456         annotations:
457           overdue_by: "{{ $value | humanizeDuration }}"
458       - alert: daily replication feed delayed
459         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
460         for: 3h
461         labels:
462           alertgroup: planet
463         annotations:
464           delayed_by: "{{ $value | humanizeDuration }}"
465       - alert: hourly replication feed delayed
466         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
467         for: 30m
468         labels:
469           alertgroup: planet
470         annotations:
471           delayed_by: "{{ $value | humanizeDuration }}"
472       - alert: minutely replication feed delayed
473         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
474         for: 5m
475         labels:
476           alertgroup: planet
477         annotations:
478           delayed_by: "{{ $value | humanizeDuration }}"
479       - alert: changeset replication feed delayed
480         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
481         for: 5m
482         labels:
483           alertgroup: planet
484         annotations:
485           delayed_by: "{{ $value | humanizeDuration }}"
486   - name: postgresql
487     rules:
488       - alert: postgresql down
489         expr: pg_up == 0
490         for: 1m
491         labels:
492           alertgroup: "{{ $labels.instance }}"
493       - alert: postgresql replication delay
494         expr: pg_replication_lag_seconds > 30
495         for: 15m
496         labels:
497           alertgroup: "{{ $labels.instance }}"
498         annotations:
499           delay: "{{ $value | humanizeDuration }}"
500       - alert: postgresql connection limit
501         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
502         for: 1m
503         labels:
504           alertgroup: "{{ $labels.instance }}"
505         annotations:
506           connections_used: "{{ $value | humanizePercentage }}"
507       - alert: postgresql deadlocks
508         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
509         for: 0m
510         labels:
511           alertgroup: "{{ $labels.instance }}"
512         annotations:
513           new_deadlocks: "{{ $value }}"
514       - alert: postgresql slow queries
515         expr: pg_slow_queries > 0
516         for: 5m
517         labels:
518           alertgroup: "{{ $labels.instance }}"
519         annotations:
520           queries: "{{ $value }}"
521       - alert: postgresql idle transactions
522         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
523         for: 5m
524         labels:
525           alertgroup: "{{ $labels.instance }}"
526         annotations:
527           queries: "{{ $value }}"
528   - name: prometheus
529     rules:
530       - alert: prometheus configuration error
531         expr: prometheus_config_last_reload_successful == 0
532         for: 10m
533         labels:
534           alertgroup: "prometheus"
535       - alert: prometheus target missing
536         expr: up == 0
537         for: 10m
538         labels:
539           alertgroup: "prometheus"
540   - name: raid
541     rules:
542       - alert: raid controller battery failed
543         expr: ohai_controller_info{battery_status="failed"} > 0
544         for: 5m
545         labels:
546           alertgroup: "{{ $labels.instance }}"
547       - alert: raid controller battery recharging
548         expr: ohai_controller_info{battery_status="recharging"} > 0
549         for: 4h
550         labels:
551           alertgroup: "{{ $labels.instance }}"
552       - alert: raid array degraded
553         expr: ohai_array_info{status="degraded"} > 0
554         for: 5m
555         labels:
556           alertgroup: "{{ $labels.instance }}"
557       - alert: raid disk failed
558         expr: ohai_disk_info{status="failed"} > 0
559         for: 5m
560         labels:
561           alertgroup: "{{ $labels.instance }}"
562   - name: rasdaemon
563     rules:
564       - alert: memory controller errors
565         expr: increase(rasdaemon_mc_events_total[1m]) > 0
566         for: 0m
567         labels:
568           alertgroup: "{{ $labels.instance }}"
569         annotations:
570           new_errors: "{{ $value }}"
571       - alert: pcie aer errors
572         expr: increase(rasdaemon_aer_events_total[1m]) > 0
573         for: 0m
574         labels:
575           alertgroup: "{{ $labels.instance }}"
576         annotations:
577           new_errors: "{{ $value }}"
578   - name: smart
579     rules:
580       - alert: smart failure
581         expr: smart_health_status == 0
582         for: 60m
583         labels:
584           alertgroup: "{{ $labels.instance }}"
585       - alert: smart ssd wearout approaching
586         expr: smart_percentage_used / 100 >= 0.8
587         for: 60m
588         labels:
589           alertgroup: "{{ $labels.instance }}"
590         annotations:
591           percentage_used: "{{ $value | humanizePercentage }}"
592   - name: smokeping
593     rules:
594       - alert: packet loss
595         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
596         for: 10m
597         labels:
598           alertgroup: smokeping
599         annotations:
600           loss_rate: "{{ $value | humanizePercentage }}"
601   - name: snmp
602     rules:
603       - alert: snmp pdus missing
604         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
605         for: 15m
606         labels:
607           alertgroup: snmp
608         annotations:
609           missing_pdus: "{{ $value }}"
610   - name: ssl
611     rules:
612       - alert: ssl certificate probe failed
613         expr: ssl_probe_success == 0
614         for: 60m
615         labels:
616           alertgroup: ssl
617       - alert: ssl certificate expiry
618         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
619         for: 0m
620         labels:
621           alertgroup: ssl
622         annotations:
623           expires_in: "{{ $value | humanizeDuration }}"
624       - alert: ssl certificate revoked
625         expr: ssl_ocsp_response_status == 1
626         for: 0m
627         labels:
628           alertgroup: ssl
629       - alert: ocsp status unknown
630         expr: ssl_ocsp_response_status == 1
631         for: 0m
632         labels:
633           alertgroup: ssl
634   - name: statuscake
635     rules:
636       - alert: statuscake uptime check failing
637         expr: statuscake_paused == 0 and statuscake_up == 0
638         for: 10m
639         labels:
640           alertgroup: statuscake
641   - name: systemd
642     rules:
643       - alert: systemd failed service
644         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
645         for: 5m
646         labels:
647           alertgroup: "{{ $labels.instance }}"
648       - alert: systemd failed chef client service
649         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
650         for: 0m
651         labels:
652           alertgroup: "{{ $labels.instance }}"
653   - name: taginfo
654     rules:
655       - alert: taginfo planet age
656         expr: time() - taginfo_data_from_seconds > 129600
657         for: 0m
658         labels:
659           alertgroup: taginfo
660         annotations:
661           age: "{{ $value | humanizeDuration }}"
662       - alert: taginfo database age
663         expr: time() - taginfo_database_update_finish_seconds > 129600
664         for: 0m
665         labels:
666           alertgroup: taginfo
667         annotations:
668           age: "{{ $value | humanizeDuration }}"
669       - alert: taginfo database size
670         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
671         for: 30m
672         labels:
673           alertgroup: taginfo
674         annotations:
675           size_change: "{{ $value | humanizePercentage }}"
676   - name: tile
677     rules:
678       - alert: renderd replication delay
679         expr: renderd_replication_delay > 120
680         for: 15m
681         labels:
682           alertgroup: tile
683         annotations:
684           delay: "{{ $value | humanizeDuration }}"
685       - alert: missed tile rate
686         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
687         for: 5m
688         labels:
689           alertgroup: tile
690         annotations:
691           miss_rate: "{{ $value | humanizePercentage }}"
692       - alert: tile render rate
693         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
694         for: 15m
695         labels:
696           alertgroup: tile
697         annotations:
698           render_rate: "{{ $value }} tiles/s"
699   - name: time
700     rules:
701       - alert: clock not synchronising
702         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
703         for: 5m
704         labels:
705           alertgroup: "{{ $labels.instance }}"
706       - alert: clock skew detected
707         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
708         for: 5m
709         labels:
710           alertgroup: "{{ $labels.instance }}"
711         annotations:
712           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
713   - name: web
714     rules:
715       - alert: web error rate
716         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
717         for: 5m
718         labels:
719           alertgroup: web
720         annotations:
721           error_rate: "{{ $value | humanizePercentage }}"
722       - alert: job processing rate
723         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
724         for: 1h
725         labels:
726           alertgroup: web
727         annotations:
728           job_processing_rate: "{{ $value | humanizePercentage }}"