]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Add alerts for equinix uplink interfaces
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: he uplink
7         expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: equinix uplink
14         expr: junos_interface_up{site="amsterdam",name=~"xe-[01]/2/0"} != 1
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           status: "{{ $value }}"
20       - alert: pdu current draw
21         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}A"
27       - alert: site power
28         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           current: "{{ $value | humanize }}kVA"
34       - alert: site temperature
35         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           temperature: "{{ $value | humanize }}C"
41       - alert: site humidity
42         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8
43         for: 6m
44         labels:
45           alertgroup: "amsterdam"
46         annotations:
47           humidity: "{{ $value | humanizePercentage }}"
48   - name: apache
49     rules:
50       - alert: apache down
51         expr: apache_up == 0
52         for: 5m
53         labels:
54           alertgroup: "{{ $labels.instance }}"
55       - alert: apache workers busy
56         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
57         for: 5m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           busy_workers: "{{ $value | humanizePercentage }}"
62       - alert: apache connection limit
63         expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
64         for: 5m
65         labels:
66           alertgroup: "{{ $labels.instance }}"
67         annotations:
68           connections: "{{ $value | humanizePercentage }}"
69   - name: chef
70     rules:
71       - alert: chef client not running
72         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
73         for: 12h
74         labels:
75           alertgroup: "{{ $labels.instance }}"
76         annotations:
77           down_time: "{{ $value | humanizeDuration }}"
78   - name: cisco
79     rules:
80       - alert: cisco fan alarm
81         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
87       - alert: cisco temperature alarm
88         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92         annotations:
93           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
94       - alert: cisco main power alarm
95         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
96         for: 5m
97         labels:
98           alertgroup: "{{ $labels.site }}"
99       - alert: cisco redundant power alarm
100         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
101         for: 5m
102         labels:
103           alertgroup: "{{ $labels.site }}"
104   - name: cpu
105     rules:
106       - alert: cpu pressure
107         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
108         for: 60m
109         labels:
110           alertgroup: "{{ $labels.instance }}"
111         annotations:
112           pressure: "{{ $value | humanizePercentage }}"
113   - name: database
114     rules:
115       - alert: active rails queries
116         expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="rails",state="active"}) by (instance) > 50 and on (instance) chef_role{name="db-master"}
117         for: 5m
118         labels:
119           alertgroup: database
120         annotations:
121           queries: "{{ $value }}"
122       - alert: active cgimap queries
123         expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="cgimap",state="active"}) by (instance) > 30 and on (instance) chef_role{name="db-master"}
124         for: 5m
125         labels:
126           alertgroup: database
127         annotations:
128           queries: "{{ $value }}"
129   - name: discourse
130     rules:
131       - alert: discourse job failure rate
132         expr: rate(discourse_job_failures[5m]) > 0
133         for: 5m
134         labels:
135           alertgroup: discourse
136         annotations:
137           failure_rate: "{{ $value }} jobs/s"
138   - name: dublin
139     rules:
140       - alert: he uplink
141         expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           status: "{{ $value }}"
147       - alert: equinix uplink
148         expr: junos_interface_up{site="dublin",name=~"xe-[01]/2/0"} != 1
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           status: "{{ $value }}"
154       - alert: pdu current draw
155         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
156         for: 6m
157         labels:
158           alertgroup: "dublin"
159         annotations:
160           current: "{{ $value | humanize }}A"
161       - alert: site power
162         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
163         for: 6m
164         labels:
165           alertgroup: "dublin"
166         annotations:
167           current: "{{ $value | humanize }}kVA"
168       - alert: site temperature
169         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
170         for: 6m
171         labels:
172           alertgroup: "dublin"
173         annotations:
174           temperature: "{{ $value | humanize }}C"
175       - alert: site humidity
176         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
177         for: 6m
178         labels:
179           alertgroup: "dublin"
180         annotations:
181           humidity: "{{ $value | humanizePercentage }}"
182   - name: fastly
183     rules:
184       - alert: fastly error rate
185         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
186         for: 15m
187         labels:
188           alertgroup: fastly
189         annotations:
190           error_rate: "{{ $value | humanizePercentage }}"
191       - alert: fastly frontend healthcheck warning
192         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
193         for: 15m
194         labels:
195           alertgroup: fastly
196       - alert: fastly frontend healthcheck critical
197         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
198         for: 5m
199         labels:
200           alertgroup: fastly
201       - alert: fastly backend healthcheck warning
202         expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
203         for: 15m
204         labels:
205           alertgroup: fastly
206       - alert: fastly backend healthcheck critical
207         expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
208         for: 5m
209         labels:
210           alertgroup: fastly
211   - name: filesystem
212     rules:
213       - alert: readonly filesystem
214         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
215         for: 0m
216         labels:
217           alertgroup: "{{ $labels.instance }}"
218       - alert: filesystem low on space
219         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
220         for: 5m
221         labels:
222           alertgroup: "{{ $labels.instance }}"
223         annotations:
224           percentage_free: "{{ $value | humanizePercentage }}"
225           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
226           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
227       - alert: filesystem low on inodes
228         expr: node_filesystem_files_free / node_filesystem_files < 0.1
229         for: 5m
230         labels:
231           alertgroup: "{{ $labels.instance }}"
232         annotations:
233           percentage_free: "{{ $value | humanizePercentage }}"
234           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
235           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
236   - name: hwmon
237     rules:
238       - alert: hwmon fan alarm
239         expr: node_hwmon_fan_alarm == 1
240         for: 5m
241         labels:
242           alertgroup: "{{ $labels.instance }}"
243         annotations:
244           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
245           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
246       - alert: hwmon temperature alarm
247         expr: node_hwmon_temp_alarm == 1
248         for: 5m
249         labels:
250           alertgroup: "{{ $labels.instance }}"
251         annotations:
252           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
253           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
254           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
255       - alert: hwmon voltage alarm
256         expr: node_hwmon_in_alarm == 1
257         for: 5m
258         labels:
259           alertgroup: "{{ $labels.instance }}"
260         annotations:
261           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
262           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
263           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
264   - name: io
265     rules:
266       - alert: io pressure
267         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
268         for: 60m
269         labels:
270           alertgroup: "{{ $labels.instance }}"
271         annotations:
272           pressure: "{{ $value | humanizePercentage }}"
273   - name: ipmi
274     rules:
275       - alert: ipmi fan alarm
276         expr: ipmi_fan_speed_state > 0
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280         annotations:
281           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
282       - alert: ipmi temperature alarm
283         expr: ipmi_temperature_state > 0
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.instance }}"
287         annotations:
288           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
289       - alert: ipmi voltage alarm
290         expr: ipmi_voltage_state > 0
291         for: 5m
292         labels:
293           alertgroup: "{{ $labels.instance }}"
294         annotations:
295           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
296       - alert: ipmi power alarm
297         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
298         for: 5m
299         labels:
300           alertgroup: "{{ $labels.instance }}"
301   - name: juniper
302     rules:
303       - alert: juniper red alarms
304         expr: juniper_alarms_red_count > 0
305         for: 5m
306         labels:
307           alertgroup: "{{ $labels.site }}"
308         annotations:
309           alarm_count: "{{ $value }} alarms"
310       - alert: juniper yellow alarms
311         expr: juniper_alarms_yellow_count > 0
312         for: 5m
313         labels:
314           alertgroup: "{{ $labels.site }}"
315         annotations:
316           alarm_count: "{{ $value }} alarms"
317       - alert: juniper cpu alarm
318         expr: junos_route_engine_load_average_five / 2 > 0.5
319         for: 5m
320         labels:
321           alertgroup: "{{ $labels.site }}"
322         annotations:
323           load_average: "{{ $value | humanizePercentage }}"
324       - alert: juniper fan alarm
325         expr: junos_environment_fan_up != 1
326         for: 5m
327         labels:
328           alertgroup: "{{ $labels.site }}"
329       - alert: juniper power alarm
330         expr: junos_environment_power_up != 1
331         for: 5m
332         labels:
333           alertgroup: "{{ $labels.site }}"
334       - alert: juniper laser receive power
335         expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
336         for: 5m
337         labels:
338           alertgroup: "{{ $labels.site }}"
339         annotations:
340           power: "{{ $value }} dBm"
341       - alert: juniper laser transmit power
342         expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
343         for: 5m
344         labels:
345           alertgroup: "{{ $labels.site }}"
346         annotations:
347           power: "{{ $value }} dBm"
348   - name: load
349     rules:
350       - alert: load average
351         expr: sum(node_load5) by (instance) / count(node_cpu_frequency_max_hertz) by (instance) > 2
352         for: 5m
353         labels:
354           alertgroup: "{{ $labels.instance }}"
355         annotations:
356           load: "{{ $value | humanizePercentage }}"
357   - name: mail
358     rules:
359       - alert: exim down
360         expr: exim_up == 0
361         for: 5m
362         labels:
363           alertgroup: "{{ $labels.instance }}"
364       - alert: exim queue length
365         expr: exim_queue > ignoring(job) exim_queue_limit
366         for: 60m
367         labels:
368           alertgroup: mail
369         annotations:
370           queue_length: "{{ $value }}"
371       - alert: mailman queue length
372         expr: mailman_queue_length > 200
373         for: 60m
374         labels:
375           alertgroup: mail
376         annotations:
377           queue_length: "{{ $value }}"
378   - name: mdadm
379     rules:
380       - alert: mdadm array inactive
381         expr: node_md_state{state="inactive"} > 0
382         for: 0m
383         labels:
384           alertgroup: "{{ $labels.instance }}"
385         annotations:
386           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
387           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
388           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
389           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
390       - alert: mdadm array degraded
391         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
392         for: 0m
393         labels:
394           alertgroup: "{{ $labels.instance }}"
395         annotations:
396           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
397           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
398           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
399           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
400       - alert: mdadm disk failed
401         expr: node_md_disks{state="failed"} > 0
402         for: 0m
403         labels:
404           alertgroup: "{{ $labels.instance }}"
405         annotations:
406           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
407           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
408           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
409           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
410   - name: memory
411     rules:
412       - alert: low memory
413         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
414         for: 15m
415         labels:
416           alertgroup: "{{ $labels.instance }}"
417         annotations:
418           memory_free: "{{ $value | humanizePercentage }}"
419       - alert: memory pressure
420         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
421         for: 60m
422         labels:
423           alertgroup: "{{ $labels.instance }}"
424         annotations:
425           pressure: "{{ $value | humanizePercentage }}"
426       - alert: oom kill detected
427         expr: increase(node_vmstat_oom_kill[1m]) > 0
428         for: 0m
429         labels:
430           alertgroup: "{{ $labels.instance }}"
431         annotations:
432           new_oom_kills: "{{ $value }}"
433   - name: mysql
434     rules:
435       - alert: mysql down
436         expr: mysql_up == 0
437         for: 1m
438         labels:
439           alertgroup: "{{ $labels.instance }}"
440       - alert: mysql connection limit
441         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
442         for: 1m
443         labels:
444           alertgroup: "{{ $labels.instance }}"
445         annotations:
446           connections_used: "{{ $value | humanizePercentage }}"
447       - alert: mysql connection errors
448         expr: increase(mysql_global_status_connection_errors_total[1m]) > 0
449         for: 0m
450         labels:
451           alertgroup: "{{ $labels.instance }}"
452         annotations:
453           error_count: "{{ $value }}"
454   - name: network
455     rules:
456       - alert: interface redundancy lost
457         expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)")
458         for: 5m
459         labels:
460           alertgroup: "{{ $labels.instance }}"
461         annotations:
462           link_count: "{{ $value }}"
463       - alert: interface transmit rate
464         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
465         for: 5m
466         labels:
467           alertgroup: "{{ $labels.instance }}"
468         annotations:
469           bandwidth_used: "{{ $value | humanizePercentage }}"
470       - alert: interface receive rate
471         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
472         for: 5m
473         labels:
474           alertgroup: "{{ $labels.instance }}"
475         annotations:
476           bandwidth_used: "{{ $value | humanizePercentage }}"
477       - alert: interface transmit errors
478         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
479         for: 5m
480         labels:
481           alertgroup: "{{ $labels.instance }}"
482         annotations:
483           error_rate: "{{ $value | humanizePercentage }}"
484       - alert: wireguard interface transmit errors
485         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
486         for: 1h
487         labels:
488           alertgroup: "{{ $labels.instance }}"
489         annotations:
490           error_rate: "{{ $value | humanizePercentage }}"
491       - alert: interface receive errors
492         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
493         for: 5m
494         labels:
495           alertgroup: "{{ $labels.instance }}"
496         annotations:
497           error_rate: "{{ $value | humanizePercentage }}"
498       - alert: conntrack entries
499         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
500         for: 5m
501         labels:
502           alertgroup: "{{ $labels.instance }}"
503         annotations:
504           entries_used: "{{ $value | humanizePercentage }}"
505   - name: nominatim
506     rules:
507       - alert: nominatim replication delay
508         expr: nominatim_replication_delay > 10800
509         for: 1h
510         labels:
511           alertgroup: nominatim
512         annotations:
513           delay: "{{ $value | humanizeDuration }}"
514       - alert: nominatim connections
515         expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500
516         for: 15m
517         labels:
518           alertgroup: nominatim
519   - name: overpass
520     rules:
521       - alert: overpass osm database age
522         expr: overpass_database_age_seconds{database="osm"} > 3600
523         for: 1h
524         labels:
525           alertgroup: overpass
526         annotations:
527           age: "{{ $value | humanizeDuration }}"
528       - alert: overpass area database age
529         expr: overpass_database_age_seconds{database="area"} > 86400
530         for: 1h
531         labels:
532           alertgroup: overpass
533         annotations:
534           age: "{{ $value | humanizeDuration }}"
535   - name: passenger
536     rules:
537       - alert: passenger down
538         expr: passenger_up == 0
539         for: 5m
540         labels:
541           alertgroup: "{{ $labels.instance }}"
542       - alert: passenger queuing
543         expr: passenger_top_level_request_queue > 0
544         for: 5m
545         labels:
546           alertgroup: "{{ $labels.instance }}"
547       - alert: passenger application queuing
548         expr: passenger_app_request_queue > 0
549         for: 5m
550         labels:
551           alertgroup: "{{ $labels.instance }}"
552   - name: planet
553     rules:
554       - alert: planet dump overdue
555         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
556         for: 24h
557         labels:
558           alertgroup: planet
559         annotations:
560           overdue_by: "{{ $value | humanizeDuration }}"
561       - alert: notes dump overdue
562         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
563         for: 6h
564         labels:
565           alertgroup: planet
566         annotations:
567           overdue_by: "{{ $value | humanizeDuration }}"
568       - alert: daily replication feed delayed
569         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
570         for: 3h
571         labels:
572           alertgroup: planet
573         annotations:
574           delayed_by: "{{ $value | humanizeDuration }}"
575       - alert: hourly replication feed delayed
576         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
577         for: 30m
578         labels:
579           alertgroup: planet
580         annotations:
581           delayed_by: "{{ $value | humanizeDuration }}"
582       - alert: minutely replication feed delayed
583         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
584         for: 5m
585         labels:
586           alertgroup: planet
587         annotations:
588           delayed_by: "{{ $value | humanizeDuration }}"
589       - alert: changeset replication feed delayed
590         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
591         for: 5m
592         labels:
593           alertgroup: planet
594         annotations:
595           delayed_by: "{{ $value | humanizeDuration }}"
596   - name: postgresql
597     rules:
598       - alert: postgresql down
599         expr: pg_up == 0
600         for: 1m
601         labels:
602           alertgroup: "{{ $labels.instance }}"
603       - alert: postgresql replication delay
604         expr: pg_replication_lag_seconds > 30
605         for: 15m
606         labels:
607           alertgroup: "{{ $labels.instance }}"
608         annotations:
609           delay: "{{ $value | humanizeDuration }}"
610       - alert: postgresql connection limit
611         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
612         for: 1m
613         labels:
614           alertgroup: "{{ $labels.instance }}"
615         annotations:
616           connections_used: "{{ $value | humanizePercentage }}"
617       - alert: postgresql deadlocks
618         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
619         for: 0m
620         labels:
621           alertgroup: "{{ $labels.instance }}"
622         annotations:
623           new_deadlocks: "{{ $value }}"
624       - alert: postgresql idle transactions
625         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
626         for: 5m
627         labels:
628           alertgroup: "{{ $labels.instance }}"
629         annotations:
630           queries: "{{ $value }}"
631   - name: prometheus
632     rules:
633       - alert: prometheus configuration error
634         expr: prometheus_config_last_reload_successful == 0
635         for: 10m
636         labels:
637           alertgroup: "prometheus"
638       - alert: prometheus target missing
639         expr: up == 0
640         for: 10m
641         labels:
642           alertgroup: "prometheus"
643       - alert: node exporter text file scrape error
644         expr: node_textfile_scrape_error > 0
645         for: 10m
646         labels:
647           alertgroup: "prometheus"
648   - name: raid
649     rules:
650       - alert: raid controller battery failed
651         expr: ohai_controller_info{battery_status="failed"} > 0
652         for: 5m
653         labels:
654           alertgroup: "{{ $labels.instance }}"
655       - alert: raid controller battery recharging
656         expr: ohai_controller_info{battery_status="recharging"} > 0
657         for: 4h
658         labels:
659           alertgroup: "{{ $labels.instance }}"
660       - alert: raid array degraded
661         expr: ohai_array_info{status="degraded"} > 0
662         for: 5m
663         labels:
664           alertgroup: "{{ $labels.instance }}"
665       - alert: raid disk failed
666         expr: ohai_disk_info{status="failed"} > 0
667         for: 5m
668         labels:
669           alertgroup: "{{ $labels.instance }}"
670   - name: rasdaemon
671     rules:
672       - alert: memory controller errors
673         expr: increase(rasdaemon_mc_events_total[1m]) > 0
674         for: 0m
675         labels:
676           alertgroup: "{{ $labels.instance }}"
677         annotations:
678           new_errors: "{{ $value }}"
679       - alert: pcie aer errors
680         expr: increase(rasdaemon_aer_events_total[1m]) > 0
681         for: 0m
682         labels:
683           alertgroup: "{{ $labels.instance }}"
684         annotations:
685           new_errors: "{{ $value }}"
686   - name: resolved
687     rules:
688       - alert: dnssec validation failures
689         expr: rate(resolved_dnssec_verdicts_total{result="bogus"}[1m]) > 1
690         for: 5m
691         labels:
692           alertgroup: "{{ $labels.instance }}"
693   - name: smart
694     rules:
695       - alert: smart failure
696         expr: smart_health_status == 0
697         for: 60m
698         labels:
699           alertgroup: "{{ $labels.instance }}"
700       - alert: smart ssd wearout approaching
701         expr: smart_percentage_used / 100 >= 0.8
702         for: 60m
703         labels:
704           alertgroup: "{{ $labels.instance }}"
705         annotations:
706           percentage_used: "{{ $value | humanizePercentage }}"
707   - name: smokeping
708     rules:
709       - alert: packet loss
710         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
711         for: 10m
712         labels:
713           alertgroup: smokeping
714         annotations:
715           loss_rate: "{{ $value | humanizePercentage }}"
716   - name: snmp
717     rules:
718       - alert: snmp pdus missing
719         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
720         for: 15m
721         labels:
722           alertgroup: snmp
723         annotations:
724           missing_pdus: "{{ $value }}"
725   - name: ssl
726     rules:
727       - alert: ssl certificate probe failed
728         expr: ssl_probe_success == 0
729         for: 60m
730         labels:
731           alertgroup: ssl
732       - alert: ssl certificate expiry
733         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
734         for: 0m
735         labels:
736           alertgroup: ssl
737         annotations:
738           expires_in: "{{ $value | humanizeDuration }}"
739       - alert: ssl certificate revoked
740         expr: ssl_ocsp_response_status == 1
741         for: 0m
742         labels:
743           alertgroup: ssl
744       - alert: ocsp status unknown
745         expr: ssl_ocsp_response_status == 1
746         for: 0m
747         labels:
748           alertgroup: ssl
749   - name: statuscake
750     rules:
751       - alert: statuscake uptime check failing
752         expr: statuscake_paused == 0 and statuscake_up == 0
753         for: 10m
754         labels:
755           alertgroup: statuscake
756   - name: systemd
757     rules:
758       - alert: systemd failed service
759         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
760         for: 5m
761         labels:
762           alertgroup: "{{ $labels.instance }}"
763       - alert: systemd failed chef client service
764         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
765         for: 0m
766         labels:
767           alertgroup: "{{ $labels.instance }}"
768   - name: taginfo
769     rules:
770       - alert: taginfo planet age
771         expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
772         for: 0m
773         labels:
774           alertgroup: taginfo
775         annotations:
776           age: "{{ $value | humanizeDuration }}"
777       - alert: taginfo database age
778         expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
779         for: 0m
780         labels:
781           alertgroup: taginfo
782         annotations:
783           age: "{{ $value | humanizeDuration }}"
784       - alert: taginfo database size
785         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
786         for: 30m
787         labels:
788           alertgroup: taginfo
789         annotations:
790           size_change: "{{ $value | humanizePercentage }}"
791   - name: tile
792     rules:
793       - alert: renderd replication delay
794         expr: renderd_replication_delay > 120
795         for: 15m
796         labels:
797           alertgroup: tile
798         annotations:
799           delay: "{{ $value | humanizeDuration }}"
800       - alert: missed tile rate
801         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
802         for: 5m
803         labels:
804           alertgroup: tile
805         annotations:
806           miss_rate: "{{ $value | humanizePercentage }}"
807       - alert: tile render rate
808         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
809         for: 15m
810         labels:
811           alertgroup: tile
812         annotations:
813           render_rate: "{{ $value }} tiles/s"
814   - name: time
815     rules:
816       - alert: clock not synchronising
817         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
818         for: 5m
819         labels:
820           alertgroup: "{{ $labels.instance }}"
821       - alert: clock skew detected
822         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
823         for: 5m
824         labels:
825           alertgroup: "{{ $labels.instance }}"
826         annotations:
827           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
828   - name: web
829     rules:
830       - alert: web error rate
831         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.01
832         for: 5m
833         labels:
834           alertgroup: web
835         annotations:
836           error_rate: "{{ $value | humanizePercentage }}"
837       - alert: job processing rate
838         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
839         for: 1h
840         labels:
841           alertgroup: web
842         annotations:
843           job_processing_rate: "{{ $value | humanizePercentage }}"