]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
cloudwatch: add eu-north-1 for osm-main to collect replication metrics
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: uplink
7         expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache connection limit
56         expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
57         for: 5m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           connections: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
101         for: 60m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: postgres replication delay
109         expr: pg_replication_lag_seconds > 30
110         for: 15m
111         labels:
112           alertgroup: database
113         annotations:
114           delay: "{{ $value | humanizeDuration }}"
115   - name: discourse
116     rules:
117       - alert: discourse job failure rate
118         expr: rate(discourse_job_failures[5m]) > 0
119         for: 5m
120         labels:
121           alertgroup: discourse
122         annotations:
123           failure_rate: "{{ $value }} jobs/s"
124   - name: dublin
125     rules:
126       - alert: uplink
127         expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           status: "{{ $value }}"
133       - alert: pdu current draw
134         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           current: "{{ $value | humanize }}A"
140       - alert: site power
141         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           current: "{{ $value | humanize }}kVA"
147       - alert: site temperature
148         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           temperature: "{{ $value | humanize }}C"
154       - alert: site humidity
155         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
156         for: 6m
157         labels:
158           alertgroup: "dublin"
159         annotations:
160           humidity: "{{ $value | humanizePercentage }}"
161   - name: fastly
162     rules:
163       - alert: fastly error rate
164         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
165         for: 15m
166         labels:
167           alertgroup: fastly
168         annotations:
169           error_rate: "{{ $value | humanizePercentage }}"
170       - alert: fastly frontend healthcheck warning
171         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
172         for: 15m
173         labels:
174           alertgroup: fastly
175       - alert: fastly frontend healthcheck critical
176         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
177         for: 5m
178         labels:
179           alertgroup: fastly
180       - alert: fastly backend healthcheck warning
181         expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
182         for: 15m
183         labels:
184           alertgroup: fastly
185       - alert: fastly backend healthcheck critical
186         expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
187         for: 5m
188         labels:
189           alertgroup: fastly
190   - name: filesystem
191     rules:
192       - alert: readonly filesystem
193         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
194         for: 0m
195         labels:
196           alertgroup: "{{ $labels.instance }}"
197       - alert: filesystem low on space
198         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
199         for: 5m
200         labels:
201           alertgroup: "{{ $labels.instance }}"
202         annotations:
203           percentage_free: "{{ $value | humanizePercentage }}"
204           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
205           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
206       - alert: filesystem low on inodes
207         expr: node_filesystem_files_free / node_filesystem_files < 0.1
208         for: 5m
209         labels:
210           alertgroup: "{{ $labels.instance }}"
211         annotations:
212           percentage_free: "{{ $value | humanizePercentage }}"
213           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
214           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
215   - name: hwmon
216     rules:
217       - alert: hwmon fan alarm
218         expr: node_hwmon_fan_alarm == 1
219         for: 5m
220         labels:
221           alertgroup: "{{ $labels.instance }}"
222         annotations:
223           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
224           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
225       - alert: hwmon temperature alarm
226         expr: node_hwmon_temp_alarm == 1
227         for: 5m
228         labels:
229           alertgroup: "{{ $labels.instance }}"
230         annotations:
231           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
232           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
233           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
234       - alert: hwmon voltage alarm
235         expr: node_hwmon_in_alarm == 1
236         for: 5m
237         labels:
238           alertgroup: "{{ $labels.instance }}"
239         annotations:
240           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
241           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
242           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
243   - name: io
244     rules:
245       - alert: io pressure
246         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
247         for: 60m
248         labels:
249           alertgroup: "{{ $labels.instance }}"
250         annotations:
251           pressure: "{{ $value | humanizePercentage }}"
252   - name: ipmi
253     rules:
254       - alert: ipmi fan alarm
255         expr: ipmi_fan_speed_state > 0
256         for: 5m
257         labels:
258           alertgroup: "{{ $labels.instance }}"
259         annotations:
260           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
261       - alert: ipmi temperature alarm
262         expr: ipmi_temperature_state > 0
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.instance }}"
266         annotations:
267           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
268       - alert: ipmi voltage alarm
269         expr: ipmi_voltage_state > 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273         annotations:
274           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
275       - alert: ipmi power alarm
276         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280   - name: juniper
281     rules:
282       - alert: juniper red alarms
283         expr: juniper_alarms_red_count > 0
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.site }}"
287         annotations:
288           alarm_count: "{{ $value }} alarms"
289       - alert: juniper yellow alarms
290         expr: juniper_alarms_yellow_count > 0
291         for: 5m
292         labels:
293           alertgroup: "{{ $labels.site }}"
294         annotations:
295           alarm_count: "{{ $value }} alarms"
296       - alert: juniper cpu alarm
297         expr: junos_route_engine_load_average_five / 2 > 0.5
298         for: 5m
299         labels:
300           alertgroup: "{{ $labels.site }}"
301         annotations:
302           load_average: "{{ $value | humanizePercentage }}"
303       - alert: juniper fan alarm
304         expr: junos_environment_fan_up != 1
305         for: 5m
306         labels:
307           alertgroup: "{{ $labels.site }}"
308       - alert: juniper power alarm
309         expr: junos_environment_power_up != 1
310         for: 5m
311         labels:
312           alertgroup: "{{ $labels.site }}"
313       - alert: juniper laser receive power
314         expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
315         for: 5m
316         labels:
317           alertgroup: "{{ $labels.site }}"
318         annotations:
319           power: "{{ $value }} dBm"
320       - alert: juniper laser transmit power
321         expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
322         for: 5m
323         labels:
324           alertgroup: "{{ $labels.site }}"
325         annotations:
326           power: "{{ $value }} dBm"
327   - name: mail
328     rules:
329       - alert: exim down
330         expr: exim_up == 0
331         for: 5m
332         labels:
333           alertgroup: "{{ $labels.instance }}"
334       - alert: exim queue length
335         expr: exim_queue > ignoring(job) exim_queue_limit
336         for: 60m
337         labels:
338           alertgroup: mail
339         annotations:
340           queue_length: "{{ $value }}"
341       - alert: mailman queue length
342         expr: mailman_queue_length > 200
343         for: 60m
344         labels:
345           alertgroup: mail
346         annotations:
347           queue_length: "{{ $value }}"
348   - name: mdadm
349     rules:
350       - alert: mdadm array inactive
351         expr: node_md_state{state="inactive"} > 0
352         for: 0m
353         labels:
354           alertgroup: "{{ $labels.instance }}"
355         annotations:
356           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
357           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
358           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
359           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
360       - alert: mdadm array degraded
361         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
362         for: 0m
363         labels:
364           alertgroup: "{{ $labels.instance }}"
365         annotations:
366           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
367           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
368           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
369           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
370       - alert: mdadm disk failed
371         expr: node_md_disks{state="failed"} > 0
372         for: 0m
373         labels:
374           alertgroup: "{{ $labels.instance }}"
375         annotations:
376           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
377           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
378           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
379           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
380   - name: memory
381     rules:
382       - alert: low memory
383         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
384         for: 15m
385         labels:
386           alertgroup: "{{ $labels.instance }}"
387         annotations:
388           memory_free: "{{ $value | humanizePercentage }}"
389       - alert: memory pressure
390         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
391         for: 60m
392         labels:
393           alertgroup: "{{ $labels.instance }}"
394         annotations:
395           pressure: "{{ $value | humanizePercentage }}"
396       - alert: oom kill detected
397         expr: increase(node_vmstat_oom_kill[1m]) > 0
398         for: 0m
399         labels:
400           alertgroup: "{{ $labels.instance }}"
401         annotations:
402           new_oom_kills: "{{ $value }}"
403   - name: mysql
404     rules:
405       - alert: mysql down
406         expr: mysql_up == 0
407         for: 1m
408         labels:
409           alertgroup: "{{ $labels.instance }}"
410       - alert: mysql connection limit
411         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
412         for: 1m
413         labels:
414           alertgroup: "{{ $labels.instance }}"
415         annotations:
416           connections_used: "{{ $value | humanizePercentage }}"
417   - name: network
418     rules:
419       - alert: interface transmit rate
420         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
421         for: 5m
422         labels:
423           alertgroup: "{{ $labels.instance }}"
424         annotations:
425           bandwidth_used: "{{ $value | humanizePercentage }}"
426       - alert: interface receive rate
427         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
428         for: 5m
429         labels:
430           alertgroup: "{{ $labels.instance }}"
431         annotations:
432           bandwidth_used: "{{ $value | humanizePercentage }}"
433       - alert: interface transmit errors
434         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
435         for: 5m
436         labels:
437           alertgroup: "{{ $labels.instance }}"
438         annotations:
439           error_rate: "{{ $value | humanizePercentage }}"
440       - alert: wireguard interface transmit errors
441         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
442         for: 1h
443         labels:
444           alertgroup: "{{ $labels.instance }}"
445         annotations:
446           error_rate: "{{ $value | humanizePercentage }}"
447       - alert: interface receive errors
448         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
449         for: 5m
450         labels:
451           alertgroup: "{{ $labels.instance }}"
452         annotations:
453           error_rate: "{{ $value | humanizePercentage }}"
454       - alert: conntrack entries
455         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
456         for: 5m
457         labels:
458           alertgroup: "{{ $labels.instance }}"
459         annotations:
460           entries_used: "{{ $value | humanizePercentage }}"
461   - name: nominatim
462     rules:
463       - alert: nominatim replication delay
464         expr: nominatim_replication_delay > 10800
465         for: 1h
466         labels:
467           alertgroup: nominatim
468         annotations:
469           delay: "{{ $value | humanizeDuration }}"
470   - name: overpass
471     rules:
472       - alert: overpass osm database age
473         expr: overpass_database_age_seconds{database="osm"} > 3600
474         for: 1h
475         labels:
476           alertgroup: overpass
477         annotations:
478           age: "{{ $value | humanizeDuration }}"
479       - alert: overpass area database age
480         expr: overpass_database_age_seconds{database="area"} > 86400
481         for: 1h
482         labels:
483           alertgroup: overpass
484         annotations:
485           age: "{{ $value | humanizeDuration }}"
486   - name: passenger
487     rules:
488       - alert: passenger down
489         expr: passenger_up == 0
490         for: 5m
491         labels:
492           alertgroup: "{{ $labels.instance }}"
493       - alert: passenger queuing
494         expr: passenger_top_level_request_queue > 0
495         for: 5m
496         labels:
497           alertgroup: "{{ $labels.instance }}"
498       - alert: passenger application queuing
499         expr: passenger_app_request_queue > 0
500         for: 5m
501         labels:
502           alertgroup: "{{ $labels.instance }}"
503   - name: planet
504     rules:
505       - alert: planet dump overdue
506         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
507         for: 24h
508         labels:
509           alertgroup: planet
510         annotations:
511           overdue_by: "{{ $value | humanizeDuration }}"
512       - alert: notes dump overdue
513         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
514         for: 6h
515         labels:
516           alertgroup: planet
517         annotations:
518           overdue_by: "{{ $value | humanizeDuration }}"
519       - alert: daily replication feed delayed
520         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
521         for: 3h
522         labels:
523           alertgroup: planet
524         annotations:
525           delayed_by: "{{ $value | humanizeDuration }}"
526       - alert: hourly replication feed delayed
527         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
528         for: 30m
529         labels:
530           alertgroup: planet
531         annotations:
532           delayed_by: "{{ $value | humanizeDuration }}"
533       - alert: minutely replication feed delayed
534         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
535         for: 5m
536         labels:
537           alertgroup: planet
538         annotations:
539           delayed_by: "{{ $value | humanizeDuration }}"
540       - alert: changeset replication feed delayed
541         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
542         for: 5m
543         labels:
544           alertgroup: planet
545         annotations:
546           delayed_by: "{{ $value | humanizeDuration }}"
547   - name: postgresql
548     rules:
549       - alert: postgresql down
550         expr: pg_up == 0
551         for: 1m
552         labels:
553           alertgroup: "{{ $labels.instance }}"
554       - alert: postgresql replication delay
555         expr: pg_replication_lag_seconds > 30
556         for: 15m
557         labels:
558           alertgroup: "{{ $labels.instance }}"
559         annotations:
560           delay: "{{ $value | humanizeDuration }}"
561       - alert: postgresql connection limit
562         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
563         for: 1m
564         labels:
565           alertgroup: "{{ $labels.instance }}"
566         annotations:
567           connections_used: "{{ $value | humanizePercentage }}"
568       - alert: postgresql deadlocks
569         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
570         for: 0m
571         labels:
572           alertgroup: "{{ $labels.instance }}"
573         annotations:
574           new_deadlocks: "{{ $value }}"
575       - alert: postgresql slow queries
576         expr: pg_slow_queries > 0
577         for: 5m
578         labels:
579           alertgroup: "{{ $labels.instance }}"
580         annotations:
581           queries: "{{ $value }}"
582       - alert: postgresql idle transactions
583         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
584         for: 5m
585         labels:
586           alertgroup: "{{ $labels.instance }}"
587         annotations:
588           queries: "{{ $value }}"
589   - name: prometheus
590     rules:
591       - alert: prometheus configuration error
592         expr: prometheus_config_last_reload_successful == 0
593         for: 10m
594         labels:
595           alertgroup: "prometheus"
596       - alert: prometheus target missing
597         expr: up == 0
598         for: 10m
599         labels:
600           alertgroup: "prometheus"
601   - name: raid
602     rules:
603       - alert: raid controller battery failed
604         expr: ohai_controller_info{battery_status="failed"} > 0
605         for: 5m
606         labels:
607           alertgroup: "{{ $labels.instance }}"
608       - alert: raid controller battery recharging
609         expr: ohai_controller_info{battery_status="recharging"} > 0
610         for: 4h
611         labels:
612           alertgroup: "{{ $labels.instance }}"
613       - alert: raid array degraded
614         expr: ohai_array_info{status="degraded"} > 0
615         for: 5m
616         labels:
617           alertgroup: "{{ $labels.instance }}"
618       - alert: raid disk failed
619         expr: ohai_disk_info{status="failed"} > 0
620         for: 5m
621         labels:
622           alertgroup: "{{ $labels.instance }}"
623   - name: rasdaemon
624     rules:
625       - alert: memory controller errors
626         expr: increase(rasdaemon_mc_events_total[1m]) > 0
627         for: 0m
628         labels:
629           alertgroup: "{{ $labels.instance }}"
630         annotations:
631           new_errors: "{{ $value }}"
632       - alert: pcie aer errors
633         expr: increase(rasdaemon_aer_events_total[1m]) > 0
634         for: 0m
635         labels:
636           alertgroup: "{{ $labels.instance }}"
637         annotations:
638           new_errors: "{{ $value }}"
639   - name: smart
640     rules:
641       - alert: smart failure
642         expr: smart_health_status == 0
643         for: 60m
644         labels:
645           alertgroup: "{{ $labels.instance }}"
646       - alert: smart ssd wearout approaching
647         expr: smart_percentage_used / 100 >= 0.8
648         for: 60m
649         labels:
650           alertgroup: "{{ $labels.instance }}"
651         annotations:
652           percentage_used: "{{ $value | humanizePercentage }}"
653   - name: smokeping
654     rules:
655       - alert: packet loss
656         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
657         for: 10m
658         labels:
659           alertgroup: smokeping
660         annotations:
661           loss_rate: "{{ $value | humanizePercentage }}"
662   - name: snmp
663     rules:
664       - alert: snmp pdus missing
665         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
666         for: 15m
667         labels:
668           alertgroup: snmp
669         annotations:
670           missing_pdus: "{{ $value }}"
671   - name: ssl
672     rules:
673       - alert: ssl certificate probe failed
674         expr: ssl_probe_success == 0
675         for: 60m
676         labels:
677           alertgroup: ssl
678       - alert: ssl certificate expiry
679         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
680         for: 0m
681         labels:
682           alertgroup: ssl
683         annotations:
684           expires_in: "{{ $value | humanizeDuration }}"
685       - alert: ssl certificate revoked
686         expr: ssl_ocsp_response_status == 1
687         for: 0m
688         labels:
689           alertgroup: ssl
690       - alert: ocsp status unknown
691         expr: ssl_ocsp_response_status == 1
692         for: 0m
693         labels:
694           alertgroup: ssl
695   - name: statuscake
696     rules:
697       - alert: statuscake uptime check failing
698         expr: statuscake_paused == 0 and statuscake_up == 0
699         for: 10m
700         labels:
701           alertgroup: statuscake
702   - name: systemd
703     rules:
704       - alert: systemd failed service
705         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
706         for: 5m
707         labels:
708           alertgroup: "{{ $labels.instance }}"
709       - alert: systemd failed chef client service
710         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
711         for: 0m
712         labels:
713           alertgroup: "{{ $labels.instance }}"
714   - name: taginfo
715     rules:
716       - alert: taginfo planet age
717         expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
718         for: 0m
719         labels:
720           alertgroup: taginfo
721         annotations:
722           age: "{{ $value | humanizeDuration }}"
723       - alert: taginfo database age
724         expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
725         for: 0m
726         labels:
727           alertgroup: taginfo
728         annotations:
729           age: "{{ $value | humanizeDuration }}"
730       - alert: taginfo database size
731         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
732         for: 30m
733         labels:
734           alertgroup: taginfo
735         annotations:
736           size_change: "{{ $value | humanizePercentage }}"
737   - name: tile
738     rules:
739       - alert: renderd replication delay
740         expr: renderd_replication_delay > 120
741         for: 15m
742         labels:
743           alertgroup: tile
744         annotations:
745           delay: "{{ $value | humanizeDuration }}"
746       - alert: missed tile rate
747         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
748         for: 5m
749         labels:
750           alertgroup: tile
751         annotations:
752           miss_rate: "{{ $value | humanizePercentage }}"
753       - alert: tile render rate
754         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
755         for: 15m
756         labels:
757           alertgroup: tile
758         annotations:
759           render_rate: "{{ $value }} tiles/s"
760   - name: time
761     rules:
762       - alert: clock not synchronising
763         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
764         for: 5m
765         labels:
766           alertgroup: "{{ $labels.instance }}"
767       - alert: clock skew detected
768         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
769         for: 5m
770         labels:
771           alertgroup: "{{ $labels.instance }}"
772         annotations:
773           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
774   - name: web
775     rules:
776       - alert: web error rate
777         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
778         for: 5m
779         labels:
780           alertgroup: web
781         annotations:
782           error_rate: "{{ $value | humanizePercentage }}"
783       - alert: job processing rate
784         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
785         for: 1h
786         labels:
787           alertgroup: web
788         annotations:
789           job_processing_rate: "{{ $value | humanizePercentage }}"