]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
4e00763e61583edf5b051214ddea2f1fb0ed6168
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site power
14         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}kVA"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48       - alert: apache low request rate
49         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
50         for: 15m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           request_rate: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
94         for: 60m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag_seconds > 30
103         for: 15m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: discourse
109     rules:
110       - alert: discourse job failure rate
111         expr: rate(discourse_job_failures[5m]) > 0
112         for: 5m
113         labels:
114           alertgroup: discourse
115         annotations:
116           failure_rate: "{{ $value }} jobs/s"
117   - name: dublin
118     rules:
119       - alert: pdu current draw
120         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
121         for: 6m
122         labels:
123           alertgroup: "dublin"
124         annotations:
125           current: "{{ $value | humanize }}A"
126       - alert: site power
127         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           current: "{{ $value | humanize }}kVA"
133       - alert: site temperature
134         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           temperature: "{{ $value | humanize }}C"
140       - alert: site humidity
141         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           humidity: "{{ $value | humanizePercentage }}"
147   - name: fastly
148     rules:
149       - alert: fastly error rate
150         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
151         for: 15m
152         labels:
153           alertgroup: fastly
154         annotations:
155           error_rate: "{{ $value | humanizePercentage }}"
156       - alert: fastly healthcheck failing
157         expr: count(fastly_healthcheck_status == 0) by (service) > 0
158         for: 15m
159         labels:
160           alertgroup: fastly
161       - alert: multiple fastly healthchecks failing
162         expr: count(fastly_healthcheck_status == 0) by (service) > 4
163         for: 5m
164         labels:
165           alertgroup: fastly
166   - name: filesystem
167     rules:
168       - alert: readonly filesystem
169         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
170         for: 0m
171         labels:
172           alertgroup: "{{ $labels.instance }}"
173       - alert: filesystem low on space
174         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
175         for: 5m
176         labels:
177           alertgroup: "{{ $labels.instance }}"
178         annotations:
179           percentage_free: "{{ $value | humanizePercentage }}"
180           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
181           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
182       - alert: filesystem low on inodes
183         expr: node_filesystem_files_free / node_filesystem_files < 0.1
184         for: 5m
185         labels:
186           alertgroup: "{{ $labels.instance }}"
187         annotations:
188           percentage_free: "{{ $value | humanizePercentage }}"
189           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
190           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
191   - name: hwmon
192     rules:
193       - alert: hwmon fan alarm
194         expr: node_hwmon_fan_alarm == 1
195         for: 5m
196         labels:
197           alertgroup: "{{ $labels.instance }}"
198         annotations:
199           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
200           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
201       - alert: hwmon temperature alarm
202         expr: node_hwmon_temp_alarm == 1
203         for: 5m
204         labels:
205           alertgroup: "{{ $labels.instance }}"
206         annotations:
207           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
208           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
209           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
210       - alert: hwmon voltage alarm
211         expr: node_hwmon_in_alarm == 1
212         for: 5m
213         labels:
214           alertgroup: "{{ $labels.instance }}"
215         annotations:
216           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
217           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
218           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
219   - name: io
220     rules:
221       - alert: io pressure
222         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
223         for: 60m
224         labels:
225           alertgroup: "{{ $labels.instance }}"
226         annotations:
227           pressure: "{{ $value | humanizePercentage }}"
228   - name: ipmi
229     rules:
230       - alert: ipmi fan alarm
231         expr: ipmi_fan_speed_state > 0
232         for: 5m
233         labels:
234           alertgroup: "{{ $labels.instance }}"
235         annotations:
236           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
237       - alert: ipmi temperature alarm
238         expr: ipmi_temperature_state > 0
239         for: 5m
240         labels:
241           alertgroup: "{{ $labels.instance }}"
242         annotations:
243           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
244       - alert: ipmi voltage alarm
245         expr: ipmi_voltage_state > 0
246         for: 5m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249         annotations:
250           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
251       - alert: ipmi power alarm
252         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
253         for: 5m
254         labels:
255           alertgroup: "{{ $labels.instance }}"
256   - name: juniper
257     rules:
258       - alert: juniper cpu alarm
259         expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
260         for: 5m
261         labels:
262           alertgroup: "{{ $labels.site }}"
263       - alert: juniper fan alarm
264         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
265         for: 5m
266         labels:
267           alertgroup: "{{ $labels.site }}"
268       - alert: juniper power alarm
269         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.site }}"
273   - name: mail
274     rules:
275       - alert: exim down
276         expr: exim_up == 0
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280       - alert: exim queue length
281         expr: exim_queue > exim_queue_limit
282         for: 60m
283         labels:
284           alertgroup: mail
285         annotations:
286           queue_length: "{{ $value }}"
287       - alert: mailman queue length
288         expr: mailman_queue_length > 200
289         for: 60m
290         labels:
291           alertgroup: mail
292         annotations:
293           queue_length: "{{ $value }}"
294   - name: mdadm
295     rules:
296       - alert: mdadm array inactive
297         expr: node_md_state{state="inactive"} > 0
298         for: 0m
299         labels:
300           alertgroup: "{{ $labels.instance }}"
301         annotations:
302           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
303           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
304           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
305           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
306       - alert: mdadm array degraded
307         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
308         for: 0m
309         labels:
310           alertgroup: "{{ $labels.instance }}"
311         annotations:
312           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
313           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
314           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
315           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
316       - alert: mdadm disk failed
317         expr: node_md_disks{state="failed"} > 0
318         for: 0m
319         labels:
320           alertgroup: "{{ $labels.instance }}"
321         annotations:
322           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
323           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
324           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
325           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
326   - name: memory
327     rules:
328       - alert: low memory
329         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
330         for: 15m
331         labels:
332           alertgroup: "{{ $labels.instance }}"
333         annotations:
334           memory_free: "{{ $value | humanizePercentage }}"
335       - alert: memory pressure
336         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
337         for: 60m
338         labels:
339           alertgroup: "{{ $labels.instance }}"
340         annotations:
341           pressure: "{{ $value | humanizePercentage }}"
342       - alert: oom kill detected
343         expr: increase(node_vmstat_oom_kill[1m]) > 0
344         for: 0m
345         labels:
346           alertgroup: "{{ $labels.instance }}"
347         annotations:
348           new_oom_kills: "{{ $value }}"
349   - name: mysql
350     rules:
351       - alert: mysql down
352         expr: mysql_up == 0
353         for: 1m
354         labels:
355           alertgroup: "{{ $labels.instance }}"
356       - alert: mysql connection limit
357         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
358         for: 1m
359         labels:
360           alertgroup: "{{ $labels.instance }}"
361         annotations:
362           connections_used: "{{ $value | humanizePercentage }}"
363   - name: network
364     rules:
365       - alert: interface transmit rate
366         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
367         for: 5m
368         labels:
369           alertgroup: "{{ $labels.instance }}"
370         annotations:
371           bandwidth_used: "{{ $value | humanizePercentage }}"
372       - alert: interface receive rate
373         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
374         for: 5m
375         labels:
376           alertgroup: "{{ $labels.instance }}"
377         annotations:
378           bandwidth_used: "{{ $value | humanizePercentage }}"
379       - alert: interface transmit errors
380         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
381         for: 5m
382         labels:
383           alertgroup: "{{ $labels.instance }}"
384         annotations:
385           error_rate: "{{ $value | humanizePercentage }}"
386       - alert: wireguard interface transmit errors
387         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
388         for: 1h
389         labels:
390           alertgroup: "{{ $labels.instance }}"
391         annotations:
392           error_rate: "{{ $value | humanizePercentage }}"
393       - alert: interface receive errors
394         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
395         for: 5m
396         labels:
397           alertgroup: "{{ $labels.instance }}"
398         annotations:
399           error_rate: "{{ $value | humanizePercentage }}"
400       - alert: conntrack entries
401         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
402         for: 5m
403         labels:
404           alertgroup: "{{ $labels.instance }}"
405         annotations:
406           entries_used: "{{ $value | humanizePercentage }}"
407   - name: nominatim
408     rules:
409       - alert: nominatim replication delay
410         expr: nominatim_replication_delay > 10800
411         for: 1h
412         labels:
413           alertgroup: nominatim
414         annotations:
415           delay: "{{ $value | humanizeDuration }}"
416   - name: overpass
417     rules:
418       - alert: overpass osm database age
419         expr: overpass_database_age_seconds{database="osm"} > 3600
420         for: 1h
421         labels:
422           alertgroup: overpass
423         annotations:
424           age: "{{ $value | humanizeDuration }}"
425       - alert: overpass area database age
426         expr: overpass_database_age_seconds{database="area"} > 86400
427         for: 1h
428         labels:
429           alertgroup: overpass
430         annotations:
431           age: "{{ $value | humanizeDuration }}"
432   - name: passenger
433     rules:
434       - alert: passenger down
435         expr: passenger_up == 0
436         for: 5m
437         labels:
438           alertgroup: "{{ $labels.instance }}"
439       - alert: passenger queuing
440         expr: passenger_top_level_request_queue > 0
441         for: 5m
442         labels:
443           alertgroup: "{{ $labels.instance }}"
444       - alert: passenger application queuing
445         expr: passenger_app_request_queue > 0
446         for: 5m
447         labels:
448           alertgroup: "{{ $labels.instance }}"
449   - name: planet
450     rules:
451       - alert: planet dump overdue
452         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
453         for: 24h
454         labels:
455           alertgroup: planet
456         annotations:
457           overdue_by: "{{ $value | humanizeDuration }}"
458       - alert: notes dump overdue
459         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
460         for: 6h
461         labels:
462           alertgroup: planet
463         annotations:
464           overdue_by: "{{ $value | humanizeDuration }}"
465       - alert: daily replication feed delayed
466         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
467         for: 3h
468         labels:
469           alertgroup: planet
470         annotations:
471           delayed_by: "{{ $value | humanizeDuration }}"
472       - alert: hourly replication feed delayed
473         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
474         for: 30m
475         labels:
476           alertgroup: planet
477         annotations:
478           delayed_by: "{{ $value | humanizeDuration }}"
479       - alert: minutely replication feed delayed
480         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
481         for: 5m
482         labels:
483           alertgroup: planet
484         annotations:
485           delayed_by: "{{ $value | humanizeDuration }}"
486       - alert: changeset replication feed delayed
487         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
488         for: 5m
489         labels:
490           alertgroup: planet
491         annotations:
492           delayed_by: "{{ $value | humanizeDuration }}"
493   - name: postgresql
494     rules:
495       - alert: postgresql down
496         expr: pg_up == 0
497         for: 1m
498         labels:
499           alertgroup: "{{ $labels.instance }}"
500       - alert: postgresql replication delay
501         expr: pg_replication_lag_seconds > 30
502         for: 15m
503         labels:
504           alertgroup: "{{ $labels.instance }}"
505         annotations:
506           delay: "{{ $value | humanizeDuration }}"
507       - alert: postgresql connection limit
508         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
509         for: 1m
510         labels:
511           alertgroup: "{{ $labels.instance }}"
512         annotations:
513           connections_used: "{{ $value | humanizePercentage }}"
514       - alert: postgresql deadlocks
515         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
516         for: 0m
517         labels:
518           alertgroup: "{{ $labels.instance }}"
519         annotations:
520           new_deadlocks: "{{ $value }}"
521       - alert: postgresql slow queries
522         expr: pg_slow_queries > 0
523         for: 5m
524         labels:
525           alertgroup: "{{ $labels.instance }}"
526         annotations:
527           queries: "{{ $value }}"
528       - alert: postgresql idle transactions
529         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
530         for: 5m
531         labels:
532           alertgroup: "{{ $labels.instance }}"
533         annotations:
534           queries: "{{ $value }}"
535   - name: prometheus
536     rules:
537       - alert: prometheus configuration error
538         expr: prometheus_config_last_reload_successful == 0
539         for: 10m
540         labels:
541           alertgroup: "prometheus"
542       - alert: prometheus target missing
543         expr: up == 0
544         for: 10m
545         labels:
546           alertgroup: "prometheus"
547   - name: raid
548     rules:
549       - alert: raid controller battery failed
550         expr: ohai_controller_info{battery_status="failed"} > 0
551         for: 5m
552         labels:
553           alertgroup: "{{ $labels.instance }}"
554       - alert: raid controller battery recharging
555         expr: ohai_controller_info{battery_status="recharging"} > 0
556         for: 4h
557         labels:
558           alertgroup: "{{ $labels.instance }}"
559       - alert: raid array degraded
560         expr: ohai_array_info{status="degraded"} > 0
561         for: 5m
562         labels:
563           alertgroup: "{{ $labels.instance }}"
564       - alert: raid disk failed
565         expr: ohai_disk_info{status="failed"} > 0
566         for: 5m
567         labels:
568           alertgroup: "{{ $labels.instance }}"
569   - name: rasdaemon
570     rules:
571       - alert: memory controller errors
572         expr: increase(rasdaemon_mc_events_total[1m]) > 0
573         for: 0m
574         labels:
575           alertgroup: "{{ $labels.instance }}"
576         annotations:
577           new_errors: "{{ $value }}"
578       - alert: pcie aer errors
579         expr: increase(rasdaemon_aer_events_total[1m]) > 0
580         for: 0m
581         labels:
582           alertgroup: "{{ $labels.instance }}"
583         annotations:
584           new_errors: "{{ $value }}"
585   - name: smart
586     rules:
587       - alert: smart failure
588         expr: smart_health_status == 0
589         for: 60m
590         labels:
591           alertgroup: "{{ $labels.instance }}"
592       - alert: smart ssd wearout approaching
593         expr: smart_percentage_used / 100 >= 0.8
594         for: 60m
595         labels:
596           alertgroup: "{{ $labels.instance }}"
597         annotations:
598           percentage_used: "{{ $value | humanizePercentage }}"
599   - name: smokeping
600     rules:
601       - alert: packet loss
602         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
603         for: 10m
604         labels:
605           alertgroup: smokeping
606         annotations:
607           loss_rate: "{{ $value | humanizePercentage }}"
608   - name: snmp
609     rules:
610       - alert: snmp pdus missing
611         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
612         for: 15m
613         labels:
614           alertgroup: snmp
615         annotations:
616           missing_pdus: "{{ $value }}"
617   - name: ssl
618     rules:
619       - alert: ssl certificate probe failed
620         expr: ssl_probe_success == 0
621         for: 60m
622         labels:
623           alertgroup: ssl
624       - alert: ssl certificate expiry
625         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
626         for: 0m
627         labels:
628           alertgroup: ssl
629         annotations:
630           expires_in: "{{ $value | humanizeDuration }}"
631       - alert: ssl certificate revoked
632         expr: ssl_ocsp_response_status == 1
633         for: 0m
634         labels:
635           alertgroup: ssl
636       - alert: ocsp status unknown
637         expr: ssl_ocsp_response_status == 1
638         for: 0m
639         labels:
640           alertgroup: ssl
641   - name: statuscake
642     rules:
643       - alert: statuscake uptime check failing
644         expr: statuscake_paused == 0 and statuscake_up == 0
645         for: 10m
646         labels:
647           alertgroup: statuscake
648   - name: systemd
649     rules:
650       - alert: systemd failed service
651         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
652         for: 5m
653         labels:
654           alertgroup: "{{ $labels.instance }}"
655       - alert: systemd failed chef client service
656         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
657         for: 0m
658         labels:
659           alertgroup: "{{ $labels.instance }}"
660   - name: taginfo
661     rules:
662       - alert: taginfo planet age
663         expr: time() - taginfo_data_from_seconds > 129600
664         for: 0m
665         labels:
666           alertgroup: taginfo
667         annotations:
668           age: "{{ $value | humanizeDuration }}"
669       - alert: taginfo database age
670         expr: time() - taginfo_database_update_finish_seconds > 129600
671         for: 0m
672         labels:
673           alertgroup: taginfo
674         annotations:
675           age: "{{ $value | humanizeDuration }}"
676       - alert: taginfo database size
677         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
678         for: 30m
679         labels:
680           alertgroup: taginfo
681         annotations:
682           size_change: "{{ $value | humanizePercentage }}"
683   - name: tile
684     rules:
685       - alert: renderd replication delay
686         expr: renderd_replication_delay > 120
687         for: 15m
688         labels:
689           alertgroup: tile
690         annotations:
691           delay: "{{ $value | humanizeDuration }}"
692       - alert: missed tile rate
693         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
694         for: 5m
695         labels:
696           alertgroup: tile
697         annotations:
698           miss_rate: "{{ $value | humanizePercentage }}"
699       - alert: tile render rate
700         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
701         for: 15m
702         labels:
703           alertgroup: tile
704         annotations:
705           render_rate: "{{ $value }} tiles/s"
706   - name: time
707     rules:
708       - alert: clock not synchronising
709         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
710         for: 5m
711         labels:
712           alertgroup: "{{ $labels.instance }}"
713       - alert: clock skew detected
714         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
715         for: 5m
716         labels:
717           alertgroup: "{{ $labels.instance }}"
718         annotations:
719           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
720   - name: web
721     rules:
722       - alert: web error rate
723         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
724         for: 5m
725         labels:
726           alertgroup: web
727         annotations:
728           error_rate: "{{ $value | humanizePercentage }}"
729       - alert: job processing rate
730         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
731         for: 15m
732         labels:
733           alertgroup: web
734         annotations:
735           job_processing_rate: "{{ $value | humanizePercentage }}"