]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Set a minimum threshold for error rate alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: uplink
7         expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache connection limit
56         expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
57         for: 5m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           connections: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
101         for: 60m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: active rails queries
109         expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="rails",state="active"}) by (instance) > 50 and on (instance) chef_role{name="db-master"}
110         for: 5m
111         labels:
112           alertgroup: database
113         annotations:
114           queries: "{{ $value }}"
115       - alert: active cgimap queries
116         expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="cgimap",state="active"}) by (instance) > 30 and on (instance) chef_role{name="db-master"}
117         for: 5m
118         labels:
119           alertgroup: database
120         annotations:
121           queries: "{{ $value }}"
122   - name: discourse
123     rules:
124       - alert: discourse job failure rate
125         expr: rate(discourse_job_failures[5m]) > 0
126         for: 5m
127         labels:
128           alertgroup: discourse
129         annotations:
130           failure_rate: "{{ $value }} jobs/s"
131   - name: dublin
132     rules:
133       - alert: uplink
134         expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           status: "{{ $value }}"
140       - alert: pdu current draw
141         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           current: "{{ $value | humanize }}A"
147       - alert: site power
148         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           current: "{{ $value | humanize }}kVA"
154       - alert: site temperature
155         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
156         for: 6m
157         labels:
158           alertgroup: "dublin"
159         annotations:
160           temperature: "{{ $value | humanize }}C"
161       - alert: site humidity
162         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
163         for: 6m
164         labels:
165           alertgroup: "dublin"
166         annotations:
167           humidity: "{{ $value | humanizePercentage }}"
168   - name: fastly
169     rules:
170       - alert: fastly error rate
171         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
172         for: 15m
173         labels:
174           alertgroup: fastly
175         annotations:
176           error_rate: "{{ $value | humanizePercentage }}"
177       - alert: fastly frontend healthcheck warning
178         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
179         for: 15m
180         labels:
181           alertgroup: fastly
182       - alert: fastly frontend healthcheck critical
183         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
184         for: 5m
185         labels:
186           alertgroup: fastly
187       - alert: fastly backend healthcheck warning
188         expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
189         for: 15m
190         labels:
191           alertgroup: fastly
192       - alert: fastly backend healthcheck critical
193         expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
194         for: 5m
195         labels:
196           alertgroup: fastly
197   - name: filesystem
198     rules:
199       - alert: readonly filesystem
200         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
201         for: 0m
202         labels:
203           alertgroup: "{{ $labels.instance }}"
204       - alert: filesystem low on space
205         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
206         for: 5m
207         labels:
208           alertgroup: "{{ $labels.instance }}"
209         annotations:
210           percentage_free: "{{ $value | humanizePercentage }}"
211           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
212           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
213       - alert: filesystem low on inodes
214         expr: node_filesystem_files_free / node_filesystem_files < 0.1
215         for: 5m
216         labels:
217           alertgroup: "{{ $labels.instance }}"
218         annotations:
219           percentage_free: "{{ $value | humanizePercentage }}"
220           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
221           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
222   - name: hwmon
223     rules:
224       - alert: hwmon fan alarm
225         expr: node_hwmon_fan_alarm == 1
226         for: 5m
227         labels:
228           alertgroup: "{{ $labels.instance }}"
229         annotations:
230           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
231           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
232       - alert: hwmon temperature alarm
233         expr: node_hwmon_temp_alarm == 1
234         for: 5m
235         labels:
236           alertgroup: "{{ $labels.instance }}"
237         annotations:
238           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
239           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
240           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
241       - alert: hwmon voltage alarm
242         expr: node_hwmon_in_alarm == 1
243         for: 5m
244         labels:
245           alertgroup: "{{ $labels.instance }}"
246         annotations:
247           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
248           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
249           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
250   - name: io
251     rules:
252       - alert: io pressure
253         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
254         for: 60m
255         labels:
256           alertgroup: "{{ $labels.instance }}"
257         annotations:
258           pressure: "{{ $value | humanizePercentage }}"
259   - name: ipmi
260     rules:
261       - alert: ipmi fan alarm
262         expr: ipmi_fan_speed_state > 0
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.instance }}"
266         annotations:
267           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
268       - alert: ipmi temperature alarm
269         expr: ipmi_temperature_state > 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273         annotations:
274           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
275       - alert: ipmi voltage alarm
276         expr: ipmi_voltage_state > 0
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280         annotations:
281           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
282       - alert: ipmi power alarm
283         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.instance }}"
287   - name: juniper
288     rules:
289       - alert: juniper red alarms
290         expr: juniper_alarms_red_count > 0
291         for: 5m
292         labels:
293           alertgroup: "{{ $labels.site }}"
294         annotations:
295           alarm_count: "{{ $value }} alarms"
296       - alert: juniper yellow alarms
297         expr: juniper_alarms_yellow_count > 0
298         for: 5m
299         labels:
300           alertgroup: "{{ $labels.site }}"
301         annotations:
302           alarm_count: "{{ $value }} alarms"
303       - alert: juniper cpu alarm
304         expr: junos_route_engine_load_average_five / 2 > 0.5
305         for: 5m
306         labels:
307           alertgroup: "{{ $labels.site }}"
308         annotations:
309           load_average: "{{ $value | humanizePercentage }}"
310       - alert: juniper fan alarm
311         expr: junos_environment_fan_up != 1
312         for: 5m
313         labels:
314           alertgroup: "{{ $labels.site }}"
315       - alert: juniper power alarm
316         expr: junos_environment_power_up != 1
317         for: 5m
318         labels:
319           alertgroup: "{{ $labels.site }}"
320       - alert: juniper laser receive power
321         expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
322         for: 5m
323         labels:
324           alertgroup: "{{ $labels.site }}"
325         annotations:
326           power: "{{ $value }} dBm"
327       - alert: juniper laser transmit power
328         expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
329         for: 5m
330         labels:
331           alertgroup: "{{ $labels.site }}"
332         annotations:
333           power: "{{ $value }} dBm"
334   - name: mail
335     rules:
336       - alert: exim down
337         expr: exim_up == 0
338         for: 5m
339         labels:
340           alertgroup: "{{ $labels.instance }}"
341       - alert: exim queue length
342         expr: exim_queue > ignoring(job) exim_queue_limit
343         for: 60m
344         labels:
345           alertgroup: mail
346         annotations:
347           queue_length: "{{ $value }}"
348       - alert: mailman queue length
349         expr: mailman_queue_length > 200
350         for: 60m
351         labels:
352           alertgroup: mail
353         annotations:
354           queue_length: "{{ $value }}"
355   - name: mdadm
356     rules:
357       - alert: mdadm array inactive
358         expr: node_md_state{state="inactive"} > 0
359         for: 0m
360         labels:
361           alertgroup: "{{ $labels.instance }}"
362         annotations:
363           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
364           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
365           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
366           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
367       - alert: mdadm array degraded
368         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
369         for: 0m
370         labels:
371           alertgroup: "{{ $labels.instance }}"
372         annotations:
373           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
374           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
375           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
376           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
377       - alert: mdadm disk failed
378         expr: node_md_disks{state="failed"} > 0
379         for: 0m
380         labels:
381           alertgroup: "{{ $labels.instance }}"
382         annotations:
383           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
384           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
385           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
386           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
387   - name: memory
388     rules:
389       - alert: low memory
390         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
391         for: 15m
392         labels:
393           alertgroup: "{{ $labels.instance }}"
394         annotations:
395           memory_free: "{{ $value | humanizePercentage }}"
396       - alert: memory pressure
397         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
398         for: 60m
399         labels:
400           alertgroup: "{{ $labels.instance }}"
401         annotations:
402           pressure: "{{ $value | humanizePercentage }}"
403       - alert: oom kill detected
404         expr: increase(node_vmstat_oom_kill[1m]) > 0
405         for: 0m
406         labels:
407           alertgroup: "{{ $labels.instance }}"
408         annotations:
409           new_oom_kills: "{{ $value }}"
410   - name: mysql
411     rules:
412       - alert: mysql down
413         expr: mysql_up == 0
414         for: 1m
415         labels:
416           alertgroup: "{{ $labels.instance }}"
417       - alert: mysql connection limit
418         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
419         for: 1m
420         labels:
421           alertgroup: "{{ $labels.instance }}"
422         annotations:
423           connections_used: "{{ $value | humanizePercentage }}"
424       - alert: mysql connection errors
425         expr: increase(mysql_global_status_connection_errors_total[1m]) > 0
426         for: 0m
427         labels:
428           alertgroup: "{{ $labels.instance }}"
429         annotations:
430           error_count: "{{ $value }}"
431   - name: network
432     rules:
433       - alert: interface redundancy lost
434         expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)")
435         for: 5m
436         labels:
437           alertgroup: "{{ $labels.instance }}"
438         annotations:
439           link_count: "{{ $value }}"
440       - alert: interface transmit rate
441         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
442         for: 5m
443         labels:
444           alertgroup: "{{ $labels.instance }}"
445         annotations:
446           bandwidth_used: "{{ $value | humanizePercentage }}"
447       - alert: interface receive rate
448         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
449         for: 5m
450         labels:
451           alertgroup: "{{ $labels.instance }}"
452         annotations:
453           bandwidth_used: "{{ $value | humanizePercentage }}"
454       - alert: interface transmit errors
455         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
456         for: 5m
457         labels:
458           alertgroup: "{{ $labels.instance }}"
459         annotations:
460           error_rate: "{{ $value | humanizePercentage }}"
461       - alert: wireguard interface transmit errors
462         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
463         for: 1h
464         labels:
465           alertgroup: "{{ $labels.instance }}"
466         annotations:
467           error_rate: "{{ $value | humanizePercentage }}"
468       - alert: interface receive errors
469         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
470         for: 5m
471         labels:
472           alertgroup: "{{ $labels.instance }}"
473         annotations:
474           error_rate: "{{ $value | humanizePercentage }}"
475       - alert: conntrack entries
476         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
477         for: 5m
478         labels:
479           alertgroup: "{{ $labels.instance }}"
480         annotations:
481           entries_used: "{{ $value | humanizePercentage }}"
482   - name: nominatim
483     rules:
484       - alert: nominatim replication delay
485         expr: nominatim_replication_delay > 10800
486         for: 1h
487         labels:
488           alertgroup: nominatim
489         annotations:
490           delay: "{{ $value | humanizeDuration }}"
491       - alert: nominatim connections
492         expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500
493         for: 15m
494         labels:
495           alertgroup: nominatim
496   - name: overpass
497     rules:
498       - alert: overpass osm database age
499         expr: overpass_database_age_seconds{database="osm"} > 3600
500         for: 1h
501         labels:
502           alertgroup: overpass
503         annotations:
504           age: "{{ $value | humanizeDuration }}"
505       - alert: overpass area database age
506         expr: overpass_database_age_seconds{database="area"} > 86400
507         for: 1h
508         labels:
509           alertgroup: overpass
510         annotations:
511           age: "{{ $value | humanizeDuration }}"
512   - name: passenger
513     rules:
514       - alert: passenger down
515         expr: passenger_up == 0
516         for: 5m
517         labels:
518           alertgroup: "{{ $labels.instance }}"
519       - alert: passenger queuing
520         expr: passenger_top_level_request_queue > 0
521         for: 5m
522         labels:
523           alertgroup: "{{ $labels.instance }}"
524       - alert: passenger application queuing
525         expr: passenger_app_request_queue > 0
526         for: 5m
527         labels:
528           alertgroup: "{{ $labels.instance }}"
529   - name: planet
530     rules:
531       - alert: planet dump overdue
532         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
533         for: 24h
534         labels:
535           alertgroup: planet
536         annotations:
537           overdue_by: "{{ $value | humanizeDuration }}"
538       - alert: notes dump overdue
539         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
540         for: 6h
541         labels:
542           alertgroup: planet
543         annotations:
544           overdue_by: "{{ $value | humanizeDuration }}"
545       - alert: daily replication feed delayed
546         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
547         for: 3h
548         labels:
549           alertgroup: planet
550         annotations:
551           delayed_by: "{{ $value | humanizeDuration }}"
552       - alert: hourly replication feed delayed
553         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
554         for: 30m
555         labels:
556           alertgroup: planet
557         annotations:
558           delayed_by: "{{ $value | humanizeDuration }}"
559       - alert: minutely replication feed delayed
560         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
561         for: 5m
562         labels:
563           alertgroup: planet
564         annotations:
565           delayed_by: "{{ $value | humanizeDuration }}"
566       - alert: changeset replication feed delayed
567         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
568         for: 5m
569         labels:
570           alertgroup: planet
571         annotations:
572           delayed_by: "{{ $value | humanizeDuration }}"
573   - name: postgresql
574     rules:
575       - alert: postgresql down
576         expr: pg_up == 0
577         for: 1m
578         labels:
579           alertgroup: "{{ $labels.instance }}"
580       - alert: postgresql replication delay
581         expr: pg_replication_lag_seconds > 30
582         for: 15m
583         labels:
584           alertgroup: "{{ $labels.instance }}"
585         annotations:
586           delay: "{{ $value | humanizeDuration }}"
587       - alert: postgresql connection limit
588         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
589         for: 1m
590         labels:
591           alertgroup: "{{ $labels.instance }}"
592         annotations:
593           connections_used: "{{ $value | humanizePercentage }}"
594       - alert: postgresql deadlocks
595         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
596         for: 0m
597         labels:
598           alertgroup: "{{ $labels.instance }}"
599         annotations:
600           new_deadlocks: "{{ $value }}"
601       - alert: postgresql idle transactions
602         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
603         for: 5m
604         labels:
605           alertgroup: "{{ $labels.instance }}"
606         annotations:
607           queries: "{{ $value }}"
608   - name: prometheus
609     rules:
610       - alert: prometheus configuration error
611         expr: prometheus_config_last_reload_successful == 0
612         for: 10m
613         labels:
614           alertgroup: "prometheus"
615       - alert: prometheus target missing
616         expr: up == 0
617         for: 10m
618         labels:
619           alertgroup: "prometheus"
620       - alert: node exporter text file scrape error
621         expr: node_textfile_scrape_error > 0
622         for: 10m
623         labels:
624           alertgroup: "prometheus"
625   - name: raid
626     rules:
627       - alert: raid controller battery failed
628         expr: ohai_controller_info{battery_status="failed"} > 0
629         for: 5m
630         labels:
631           alertgroup: "{{ $labels.instance }}"
632       - alert: raid controller battery recharging
633         expr: ohai_controller_info{battery_status="recharging"} > 0
634         for: 4h
635         labels:
636           alertgroup: "{{ $labels.instance }}"
637       - alert: raid array degraded
638         expr: ohai_array_info{status="degraded"} > 0
639         for: 5m
640         labels:
641           alertgroup: "{{ $labels.instance }}"
642       - alert: raid disk failed
643         expr: ohai_disk_info{status="failed"} > 0
644         for: 5m
645         labels:
646           alertgroup: "{{ $labels.instance }}"
647   - name: rasdaemon
648     rules:
649       - alert: memory controller errors
650         expr: increase(rasdaemon_mc_events_total[1m]) > 0
651         for: 0m
652         labels:
653           alertgroup: "{{ $labels.instance }}"
654         annotations:
655           new_errors: "{{ $value }}"
656       - alert: pcie aer errors
657         expr: increase(rasdaemon_aer_events_total[1m]) > 0
658         for: 0m
659         labels:
660           alertgroup: "{{ $labels.instance }}"
661         annotations:
662           new_errors: "{{ $value }}"
663   - name: smart
664     rules:
665       - alert: smart failure
666         expr: smart_health_status == 0
667         for: 60m
668         labels:
669           alertgroup: "{{ $labels.instance }}"
670       - alert: smart ssd wearout approaching
671         expr: smart_percentage_used / 100 >= 0.8
672         for: 60m
673         labels:
674           alertgroup: "{{ $labels.instance }}"
675         annotations:
676           percentage_used: "{{ $value | humanizePercentage }}"
677   - name: smokeping
678     rules:
679       - alert: packet loss
680         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
681         for: 10m
682         labels:
683           alertgroup: smokeping
684         annotations:
685           loss_rate: "{{ $value | humanizePercentage }}"
686   - name: snmp
687     rules:
688       - alert: snmp pdus missing
689         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
690         for: 15m
691         labels:
692           alertgroup: snmp
693         annotations:
694           missing_pdus: "{{ $value }}"
695   - name: ssl
696     rules:
697       - alert: ssl certificate probe failed
698         expr: ssl_probe_success == 0
699         for: 60m
700         labels:
701           alertgroup: ssl
702       - alert: ssl certificate expiry
703         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
704         for: 0m
705         labels:
706           alertgroup: ssl
707         annotations:
708           expires_in: "{{ $value | humanizeDuration }}"
709       - alert: ssl certificate revoked
710         expr: ssl_ocsp_response_status == 1
711         for: 0m
712         labels:
713           alertgroup: ssl
714       - alert: ocsp status unknown
715         expr: ssl_ocsp_response_status == 1
716         for: 0m
717         labels:
718           alertgroup: ssl
719   - name: statuscake
720     rules:
721       - alert: statuscake uptime check failing
722         expr: statuscake_paused == 0 and statuscake_up == 0
723         for: 10m
724         labels:
725           alertgroup: statuscake
726   - name: systemd
727     rules:
728       - alert: systemd failed service
729         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
730         for: 5m
731         labels:
732           alertgroup: "{{ $labels.instance }}"
733       - alert: systemd failed chef client service
734         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
735         for: 0m
736         labels:
737           alertgroup: "{{ $labels.instance }}"
738   - name: taginfo
739     rules:
740       - alert: taginfo planet age
741         expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
742         for: 0m
743         labels:
744           alertgroup: taginfo
745         annotations:
746           age: "{{ $value | humanizeDuration }}"
747       - alert: taginfo database age
748         expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
749         for: 0m
750         labels:
751           alertgroup: taginfo
752         annotations:
753           age: "{{ $value | humanizeDuration }}"
754       - alert: taginfo database size
755         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
756         for: 30m
757         labels:
758           alertgroup: taginfo
759         annotations:
760           size_change: "{{ $value | humanizePercentage }}"
761   - name: tile
762     rules:
763       - alert: renderd replication delay
764         expr: renderd_replication_delay > 120
765         for: 15m
766         labels:
767           alertgroup: tile
768         annotations:
769           delay: "{{ $value | humanizeDuration }}"
770       - alert: missed tile rate
771         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
772         for: 5m
773         labels:
774           alertgroup: tile
775         annotations:
776           miss_rate: "{{ $value | humanizePercentage }}"
777       - alert: tile render rate
778         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
779         for: 15m
780         labels:
781           alertgroup: tile
782         annotations:
783           render_rate: "{{ $value }} tiles/s"
784   - name: time
785     rules:
786       - alert: clock not synchronising
787         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
788         for: 5m
789         labels:
790           alertgroup: "{{ $labels.instance }}"
791       - alert: clock skew detected
792         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
793         for: 5m
794         labels:
795           alertgroup: "{{ $labels.instance }}"
796         annotations:
797           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
798   - name: web
799     rules:
800       - alert: web error rate
801         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.01
802         for: 5m
803         labels:
804           alertgroup: web
805         annotations:
806           error_rate: "{{ $value | humanizePercentage }}"
807       - alert: job processing rate
808         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
809         for: 1h
810         labels:
811           alertgroup: web
812         annotations:
813           job_processing_rate: "{{ $value | humanizePercentage }}"