]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Alert for RAID batteries that have been recharging for too long
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site current draw
14         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 100) > 3
22         for: 0m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache low request rate
56         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
57         for: 15m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           request_rate: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
101         for: 60m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: postgres replication delay
109         expr: pg_replication_lag_seconds > 30
110         for: 15m
111         labels:
112           alertgroup: database
113         annotations:
114           delay: "{{ $value | humanizeDuration }}"
115   - name: discourse
116     rules:
117       - alert: discourse job failure rate
118         expr: rate(discourse_job_failures[5m]) > 0
119         for: 5m
120         labels:
121           alertgroup: discourse
122         annotations:
123           failure_rate: "{{ $value }} jobs/s"
124   - name: dublin
125     rules:
126       - alert: pdu current draw
127         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           current: "{{ $value | humanize }}A"
133       - alert: site current draw
134         expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           current: "{{ $value | humanize }}A"
140       - alert: site power
141         expr: sum(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"} / 100) > 4
142         for: 0m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           current: "{{ $value | humanize }}kVA"
147       - alert: site temperature
148         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           temperature: "{{ $value | humanize }}C"
154       - alert: site humidity
155         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
156         for: 6m
157         labels:
158           alertgroup: "dublin"
159         annotations:
160           humidity: "{{ $value | humanizePercentage }}"
161   - name: fastly
162     rules:
163       - alert: fastly error rate
164         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
165         for: 15m
166         labels:
167           alertgroup: fastly
168         annotations:
169           error_rate: "{{ $value | humanizePercentage }}"
170       - alert: fastly healthcheck failing
171         expr: count(fastly_healthcheck_status == 0) by (service) > 0
172         for: 15m
173         labels:
174           alertgroup: fastly
175       - alert: multiple fastly healthchecks failing
176         expr: count(fastly_healthcheck_status == 0) by (service) > 4
177         for: 5m
178         labels:
179           alertgroup: fastly
180   - name: filesystem
181     rules:
182       - alert: readonly filesystem
183         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
184         for: 0m
185         labels:
186           alertgroup: "{{ $labels.instance }}"
187       - alert: filesystem low on space
188         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
189         for: 5m
190         labels:
191           alertgroup: "{{ $labels.instance }}"
192         annotations:
193           percentage_free: "{{ $value | humanizePercentage }}"
194           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
195           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
196       - alert: filesystem low on inodes
197         expr: node_filesystem_files_free / node_filesystem_files < 0.1
198         for: 5m
199         labels:
200           alertgroup: "{{ $labels.instance }}"
201         annotations:
202           percentage_free: "{{ $value | humanizePercentage }}"
203           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
204           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
205   - name: hwmon
206     rules:
207       - alert: hwmon fan alarm
208         expr: node_hwmon_fan_alarm == 1
209         for: 5m
210         labels:
211           alertgroup: "{{ $labels.instance }}"
212         annotations:
213           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
214           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
215       - alert: hwmon temperature alarm
216         expr: node_hwmon_temp_alarm == 1
217         for: 5m
218         labels:
219           alertgroup: "{{ $labels.instance }}"
220         annotations:
221           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
222           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
223           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
224       - alert: hwmon voltage alarm
225         expr: node_hwmon_in_alarm == 1
226         for: 5m
227         labels:
228           alertgroup: "{{ $labels.instance }}"
229         annotations:
230           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
231           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
232           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
233   - name: io
234     rules:
235       - alert: io pressure
236         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
237         for: 60m
238         labels:
239           alertgroup: "{{ $labels.instance }}"
240         annotations:
241           pressure: "{{ $value | humanizePercentage }}"
242   - name: ipmi
243     rules:
244       - alert: ipmi fan alarm
245         expr: ipmi_fan_speed_state > 0
246         for: 5m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249         annotations:
250           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
251       - alert: ipmi temperature alarm
252         expr: ipmi_temperature_state > 0
253         for: 5m
254         labels:
255           alertgroup: "{{ $labels.instance }}"
256         annotations:
257           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
258       - alert: ipmi voltage alarm
259         expr: ipmi_voltage_state > 0
260         for: 5m
261         labels:
262           alertgroup: "{{ $labels.instance }}"
263         annotations:
264           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
265       - alert: ipmi power alarm
266         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
267         for: 5m
268         labels:
269           alertgroup: "{{ $labels.instance }}"
270   - name: juniper
271     rules:
272       - alert: juniper cpu alarm
273         expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
274         for: 5m
275         labels:
276           alertgroup: "{{ $labels.site }}"
277       - alert: juniper fan alarm
278         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
279         for: 5m
280         labels:
281           alertgroup: "{{ $labels.site }}"
282       - alert: juniper power alarm
283         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.site }}"
287   - name: mail
288     rules:
289       - alert: exim down
290         expr: exim_up == 0
291         for: 5m
292         labels:
293           alertgroup: "{{ $labels.instance }}"
294       - alert: exim queue length
295         expr: exim_queue > exim_queue_limit
296         for: 60m
297         labels:
298           alertgroup: mail
299         annotations:
300           queue_length: "{{ $value }}"
301       - alert: mailman queue length
302         expr: mailman_queue_length > 200
303         for: 60m
304         labels:
305           alertgroup: mail
306         annotations:
307           queue_length: "{{ $value }}"
308   - name: mdadm
309     rules:
310       - alert: mdadm array inactive
311         expr: node_md_state{state="inactive"} > 0
312         for: 0m
313         labels:
314           alertgroup: "{{ $labels.instance }}"
315         annotations:
316           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
317           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
318           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
319           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
320       - alert: mdadm array degraded
321         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
322         for: 0m
323         labels:
324           alertgroup: "{{ $labels.instance }}"
325         annotations:
326           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
327           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
328           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
329           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
330       - alert: mdadm disk failed
331         expr: node_md_disks{state="failed"} > 0
332         for: 0m
333         labels:
334           alertgroup: "{{ $labels.instance }}"
335         annotations:
336           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
337           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
338           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
339           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
340   - name: memory
341     rules:
342       - alert: low memory
343         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
344         for: 15m
345         labels:
346           alertgroup: "{{ $labels.instance }}"
347         annotations:
348           memory_free: "{{ $value | humanizePercentage }}"
349       - alert: memory pressure
350         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
351         for: 60m
352         labels:
353           alertgroup: "{{ $labels.instance }}"
354         annotations:
355           pressure: "{{ $value | humanizePercentage }}"
356       - alert: oom kill detected
357         expr: increase(node_vmstat_oom_kill[1m]) > 0
358         for: 0m
359         labels:
360           alertgroup: "{{ $labels.instance }}"
361         annotations:
362           new_oom_kills: "{{ $value }}"
363   - name: mysql
364     rules:
365       - alert: mysql down
366         expr: mysql_up == 0
367         for: 1m
368         labels:
369           alertgroup: "{{ $labels.instance }}"
370       - alert: mysql connection limit
371         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
372         for: 1m
373         labels:
374           alertgroup: "{{ $labels.instance }}"
375         annotations:
376           connections_used: "{{ $value | humanizePercentage }}"
377   - name: network
378     rules:
379       - alert: interface transmit rate
380         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
381         for: 5m
382         labels:
383           alertgroup: "{{ $labels.instance }}"
384         annotations:
385           bandwidth_used: "{{ $value | humanizePercentage }}"
386       - alert: interface receive rate
387         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
388         for: 5m
389         labels:
390           alertgroup: "{{ $labels.instance }}"
391         annotations:
392           bandwidth_used: "{{ $value | humanizePercentage }}"
393       - alert: interface transmit errors
394         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
395         for: 5m
396         labels:
397           alertgroup: "{{ $labels.instance }}"
398         annotations:
399           error_rate: "{{ $value | humanizePercentage }}"
400       - alert: wireguard interface transmit errors
401         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
402         for: 1h
403         labels:
404           alertgroup: "{{ $labels.instance }}"
405         annotations:
406           error_rate: "{{ $value | humanizePercentage }}"
407       - alert: interface receive errors
408         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
409         for: 5m
410         labels:
411           alertgroup: "{{ $labels.instance }}"
412         annotations:
413           error_rate: "{{ $value | humanizePercentage }}"
414       - alert: conntrack entries
415         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
416         for: 5m
417         labels:
418           alertgroup: "{{ $labels.instance }}"
419         annotations:
420           entries_used: "{{ $value | humanizePercentage }}"
421   - name: nominatim
422     rules:
423       - alert: nominatim replication delay
424         expr: nominatim_replication_delay > 10800
425         for: 1h
426         labels:
427           alertgroup: nominatim
428         annotations:
429           delay: "{{ $value | humanizeDuration }}"
430   - name: overpass
431     rules:
432       - alert: overpass osm database age
433         expr: overpass_database_age_seconds{database="osm"} > 3600
434         for: 1h
435         labels:
436           alertgroup: overpass
437         annotations:
438           age: "{{ $value | humanizeDuration }}"
439       - alert: overpass area database age
440         expr: overpass_database_age_seconds{database="area"} > 86400
441         for: 1h
442         labels:
443           alertgroup: overpass
444         annotations:
445           age: "{{ $value | humanizeDuration }}"
446   - name: passenger
447     rules:
448       - alert: passenger down
449         expr: passenger_up == 0
450         for: 5m
451         labels:
452           alertgroup: "{{ $labels.instance }}"
453       - alert: passenger queuing
454         expr: passenger_top_level_request_queue > 0
455         for: 5m
456         labels:
457           alertgroup: "{{ $labels.instance }}"
458       - alert: passenger application queuing
459         expr: passenger_app_request_queue > 0
460         for: 5m
461         labels:
462           alertgroup: "{{ $labels.instance }}"
463   - name: planet
464     rules:
465       - alert: planet dump overdue
466         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
467         for: 24h
468         labels:
469           alertgroup: planet
470         annotations:
471           overdue_by: "{{ $value | humanizeDuration }}"
472       - alert: notes dump overdue
473         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
474         for: 6h
475         labels:
476           alertgroup: planet
477         annotations:
478           overdue_by: "{{ $value | humanizeDuration }}"
479       - alert: daily replication feed delayed
480         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
481         for: 3h
482         labels:
483           alertgroup: planet
484         annotations:
485           delayed_by: "{{ $value | humanizeDuration }}"
486       - alert: hourly replication feed delayed
487         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
488         for: 30m
489         labels:
490           alertgroup: planet
491         annotations:
492           delayed_by: "{{ $value | humanizeDuration }}"
493       - alert: minutely replication feed delayed
494         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
495         for: 5m
496         labels:
497           alertgroup: planet
498         annotations:
499           delayed_by: "{{ $value | humanizeDuration }}"
500       - alert: changeset replication feed delayed
501         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
502         for: 5m
503         labels:
504           alertgroup: planet
505         annotations:
506           delayed_by: "{{ $value | humanizeDuration }}"
507   - name: postgresql
508     rules:
509       - alert: postgresql down
510         expr: pg_up == 0
511         for: 1m
512         labels:
513           alertgroup: "{{ $labels.instance }}"
514       - alert: postgresql replication delay
515         expr: pg_replication_lag_seconds > 30
516         for: 15m
517         labels:
518           alertgroup: "{{ $labels.instance }}"
519         annotations:
520           delay: "{{ $value | humanizeDuration }}"
521       - alert: postgresql connection limit
522         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
523         for: 1m
524         labels:
525           alertgroup: "{{ $labels.instance }}"
526         annotations:
527           connections_used: "{{ $value | humanizePercentage }}"
528       - alert: postgresql deadlocks
529         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
530         for: 0m
531         labels:
532           alertgroup: "{{ $labels.instance }}"
533         annotations:
534           new_deadlocks: "{{ $value }}"
535       - alert: postgresql slow queries
536         expr: pg_slow_queries > 0
537         for: 5m
538         labels:
539           alertgroup: "{{ $labels.instance }}"
540         annotations:
541           queries: "{{ $value }}"
542   - name: prometheus
543     rules:
544       - alert: prometheus configuration error
545         expr: prometheus_config_last_reload_successful == 0
546         for: 10m
547         labels:
548           alertgroup: "prometheus"
549       - alert: prometheus target missing
550         expr: up == 0
551         for: 10m
552         labels:
553           alertgroup: "prometheus"
554   - name: raid
555     rules:
556       - alert: raid controller battery failed
557         expr: ohai_controller_info{battery_status="failed"} > 0
558         for: 5m
559         labels:
560           alertgroup: "{{ $labels.instance }}"
561       - alert: raid controller battery recharging
562         expr: ohai_controller_info{battery_status="recharging"} > 0
563         for: 4h
564         labels:
565           alertgroup: "{{ $labels.instance }}"
566       - alert: raid array degraded
567         expr: ohai_array_info{status="degraded"} > 0
568         for: 5m
569         labels:
570           alertgroup: "{{ $labels.instance }}"
571       - alert: raid disk failed
572         expr: ohai_disk_info{status="failed"} > 0
573         for: 5m
574         labels:
575           alertgroup: "{{ $labels.instance }}"
576   - name: rasdaemon
577     rules:
578       - alert: memory controller errors
579         expr: increase(rasdaemon_mc_events_total[1m]) > 0
580         for: 0m
581         labels:
582           alertgroup: "{{ $labels.instance }}"
583         annotations:
584           new_errors: "{{ $value }}"
585       - alert: pcie aer errors
586         expr: increase(rasdaemon_aer_events_total[1m]) > 0
587         for: 0m
588         labels:
589           alertgroup: "{{ $labels.instance }}"
590         annotations:
591           new_errors: "{{ $value }}"
592   - name: smart
593     rules:
594       - alert: smart failure
595         expr: smart_health_status == 0
596         for: 60m
597         labels:
598           alertgroup: "{{ $labels.instance }}"
599       - alert: smart ssd wearout approaching
600         expr: smart_percentage_used / 100 >= 0.8
601         for: 60m
602         labels:
603           alertgroup: "{{ $labels.instance }}"
604         annotations:
605           percentage_used: "{{ $value | humanizePercentage }}"
606   - name: smokeping
607     rules:
608       - alert: packet loss
609         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
610         for: 10m
611         labels:
612           alertgroup: smokeping
613         annotations:
614           loss_rate: "{{ $value | humanizePercentage }}"
615   - name: snmp
616     rules:
617       - alert: snmp pdus missing
618         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
619         for: 15m
620         labels:
621           alertgroup: snmp
622         annotations:
623           missing_pdus: "{{ $value }}"
624   - name: ssl
625     rules:
626       - alert: ssl certificate probe failed
627         expr: ssl_probe_success == 0
628         for: 60m
629         labels:
630           alertgroup: ssl
631       - alert: ssl certificate expiry
632         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
633         for: 0m
634         labels:
635           alertgroup: ssl
636         annotations:
637           expires_in: "{{ $value | humanizeDuration }}"
638       - alert: ssl certificate revoked
639         expr: ssl_ocsp_response_status == 1
640         for: 0m
641         labels:
642           alertgroup: ssl
643       - alert: ocsp status unknown
644         expr: ssl_ocsp_response_status == 1
645         for: 0m
646         labels:
647           alertgroup: ssl
648   - name: statuscake
649     rules:
650       - alert: statuscake uptime check failing
651         expr: statuscake_paused == 0 and statuscake_up == 0
652         for: 10m
653         labels:
654           alertgroup: statuscake
655   - name: systemd
656     rules:
657       - alert: systemd failed service
658         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
659         for: 5m
660         labels:
661           alertgroup: "{{ $labels.instance }}"
662       - alert: systemd failed chef client service
663         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
664         for: 0m
665         labels:
666           alertgroup: "{{ $labels.instance }}"
667   - name: tile
668     rules:
669       - alert: renderd replication delay
670         expr: renderd_replication_delay > 120
671         for: 15m
672         labels:
673           alertgroup: tile
674         annotations:
675           delay: "{{ $value | humanizeDuration }}"
676       - alert: missed tile rate
677         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
678         for: 5m
679         labels:
680           alertgroup: tile
681         annotations:
682           miss_rate: "{{ $value | humanizePercentage }}"
683       - alert: tile render rate
684         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
685         for: 15m
686         labels:
687           alertgroup: tile
688         annotations:
689           render_rate: "{{ $value }} tiles/s"
690   - name: time
691     rules:
692       - alert: clock not synchronising
693         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
694         for: 5m
695         labels:
696           alertgroup: "{{ $labels.instance }}"
697       - alert: clock skew detected
698         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
699         for: 5m
700         labels:
701           alertgroup: "{{ $labels.instance }}"
702         annotations:
703           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
704   - name: web
705     rules:
706       - alert: web error rate
707         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
708         for: 5m
709         labels:
710           alertgroup: web
711         annotations:
712           error_rate: "{{ $value | humanizePercentage }}"
713       - alert: job processing rate
714         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
715         for: 15m
716         labels:
717           alertgroup: web
718         annotations:
719           job_processing_rate: "{{ $value | humanizePercentage }}"