]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Correct scaling for junos load average alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: uplink
7         expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
94         for: 60m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag_seconds > 30
103         for: 15m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: discourse
109     rules:
110       - alert: discourse job failure rate
111         expr: rate(discourse_job_failures[5m]) > 0
112         for: 5m
113         labels:
114           alertgroup: discourse
115         annotations:
116           failure_rate: "{{ $value }} jobs/s"
117   - name: dublin
118     rules:
119       - alert: uplink
120         expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
121         for: 6m
122         labels:
123           alertgroup: "dublin"
124         annotations:
125           status: "{{ $value }}"
126       - alert: pdu current draw
127         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           current: "{{ $value | humanize }}A"
133       - alert: site power
134         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           current: "{{ $value | humanize }}kVA"
140       - alert: site temperature
141         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           temperature: "{{ $value | humanize }}C"
147       - alert: site humidity
148         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           humidity: "{{ $value | humanizePercentage }}"
154   - name: fastly
155     rules:
156       - alert: fastly error rate
157         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
158         for: 15m
159         labels:
160           alertgroup: fastly
161         annotations:
162           error_rate: "{{ $value | humanizePercentage }}"
163       - alert: fastly frontend healthcheck warning
164         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
165         for: 15m
166         labels:
167           alertgroup: fastly
168       - alert: fastly frontend healthcheck critical
169         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
170         for: 5m
171         labels:
172           alertgroup: fastly
173       - alert: fastly backend healthcheck warning
174         expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
175         for: 15m
176         labels:
177           alertgroup: fastly
178       - alert: fastly backend healthcheck critical
179         expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
180         for: 5m
181         labels:
182           alertgroup: fastly
183   - name: filesystem
184     rules:
185       - alert: readonly filesystem
186         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
187         for: 0m
188         labels:
189           alertgroup: "{{ $labels.instance }}"
190       - alert: filesystem low on space
191         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
192         for: 5m
193         labels:
194           alertgroup: "{{ $labels.instance }}"
195         annotations:
196           percentage_free: "{{ $value | humanizePercentage }}"
197           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
198           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
199       - alert: filesystem low on inodes
200         expr: node_filesystem_files_free / node_filesystem_files < 0.1
201         for: 5m
202         labels:
203           alertgroup: "{{ $labels.instance }}"
204         annotations:
205           percentage_free: "{{ $value | humanizePercentage }}"
206           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
207           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
208   - name: hwmon
209     rules:
210       - alert: hwmon fan alarm
211         expr: node_hwmon_fan_alarm == 1
212         for: 5m
213         labels:
214           alertgroup: "{{ $labels.instance }}"
215         annotations:
216           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
217           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
218       - alert: hwmon temperature alarm
219         expr: node_hwmon_temp_alarm == 1
220         for: 5m
221         labels:
222           alertgroup: "{{ $labels.instance }}"
223         annotations:
224           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
225           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
226           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
227       - alert: hwmon voltage alarm
228         expr: node_hwmon_in_alarm == 1
229         for: 5m
230         labels:
231           alertgroup: "{{ $labels.instance }}"
232         annotations:
233           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
234           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
235           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
236   - name: io
237     rules:
238       - alert: io pressure
239         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
240         for: 60m
241         labels:
242           alertgroup: "{{ $labels.instance }}"
243         annotations:
244           pressure: "{{ $value | humanizePercentage }}"
245   - name: ipmi
246     rules:
247       - alert: ipmi fan alarm
248         expr: ipmi_fan_speed_state > 0
249         for: 5m
250         labels:
251           alertgroup: "{{ $labels.instance }}"
252         annotations:
253           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
254       - alert: ipmi temperature alarm
255         expr: ipmi_temperature_state > 0
256         for: 5m
257         labels:
258           alertgroup: "{{ $labels.instance }}"
259         annotations:
260           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
261       - alert: ipmi voltage alarm
262         expr: ipmi_voltage_state > 0
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.instance }}"
266         annotations:
267           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
268       - alert: ipmi power alarm
269         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273   - name: juniper
274     rules:
275       - alert: juniper cpu alarm
276         expr: junos_route_engine_load_average_five / 2 > 0.5
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.site }}"
280         annotations:
281           load_average: "{{ $value | humanizePercentage }}"
282       - alert: juniper fan alarm
283         expr: junos_environment_fan_up != 1
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.site }}"
287       - alert: juniper power alarm
288         expr: junos_environment_power_up != 1
289         for: 5m
290         labels:
291           alertgroup: "{{ $labels.site }}"
292   - name: mail
293     rules:
294       - alert: exim down
295         expr: exim_up == 0
296         for: 5m
297         labels:
298           alertgroup: "{{ $labels.instance }}"
299       - alert: exim queue length
300         expr: exim_queue > ignoring(job) exim_queue_limit
301         for: 60m
302         labels:
303           alertgroup: mail
304         annotations:
305           queue_length: "{{ $value }}"
306       - alert: mailman queue length
307         expr: mailman_queue_length > 200
308         for: 60m
309         labels:
310           alertgroup: mail
311         annotations:
312           queue_length: "{{ $value }}"
313   - name: mdadm
314     rules:
315       - alert: mdadm array inactive
316         expr: node_md_state{state="inactive"} > 0
317         for: 0m
318         labels:
319           alertgroup: "{{ $labels.instance }}"
320         annotations:
321           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
322           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
323           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
324           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
325       - alert: mdadm array degraded
326         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
327         for: 0m
328         labels:
329           alertgroup: "{{ $labels.instance }}"
330         annotations:
331           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
332           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
333           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
334           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
335       - alert: mdadm disk failed
336         expr: node_md_disks{state="failed"} > 0
337         for: 0m
338         labels:
339           alertgroup: "{{ $labels.instance }}"
340         annotations:
341           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
342           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
343           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
344           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
345   - name: memory
346     rules:
347       - alert: low memory
348         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
349         for: 15m
350         labels:
351           alertgroup: "{{ $labels.instance }}"
352         annotations:
353           memory_free: "{{ $value | humanizePercentage }}"
354       - alert: memory pressure
355         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
356         for: 60m
357         labels:
358           alertgroup: "{{ $labels.instance }}"
359         annotations:
360           pressure: "{{ $value | humanizePercentage }}"
361       - alert: oom kill detected
362         expr: increase(node_vmstat_oom_kill[1m]) > 0
363         for: 0m
364         labels:
365           alertgroup: "{{ $labels.instance }}"
366         annotations:
367           new_oom_kills: "{{ $value }}"
368   - name: mysql
369     rules:
370       - alert: mysql down
371         expr: mysql_up == 0
372         for: 1m
373         labels:
374           alertgroup: "{{ $labels.instance }}"
375       - alert: mysql connection limit
376         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
377         for: 1m
378         labels:
379           alertgroup: "{{ $labels.instance }}"
380         annotations:
381           connections_used: "{{ $value | humanizePercentage }}"
382   - name: network
383     rules:
384       - alert: interface transmit rate
385         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
386         for: 5m
387         labels:
388           alertgroup: "{{ $labels.instance }}"
389         annotations:
390           bandwidth_used: "{{ $value | humanizePercentage }}"
391       - alert: interface receive rate
392         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
393         for: 5m
394         labels:
395           alertgroup: "{{ $labels.instance }}"
396         annotations:
397           bandwidth_used: "{{ $value | humanizePercentage }}"
398       - alert: interface transmit errors
399         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
400         for: 5m
401         labels:
402           alertgroup: "{{ $labels.instance }}"
403         annotations:
404           error_rate: "{{ $value | humanizePercentage }}"
405       - alert: wireguard interface transmit errors
406         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
407         for: 1h
408         labels:
409           alertgroup: "{{ $labels.instance }}"
410         annotations:
411           error_rate: "{{ $value | humanizePercentage }}"
412       - alert: interface receive errors
413         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
414         for: 5m
415         labels:
416           alertgroup: "{{ $labels.instance }}"
417         annotations:
418           error_rate: "{{ $value | humanizePercentage }}"
419       - alert: conntrack entries
420         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
421         for: 5m
422         labels:
423           alertgroup: "{{ $labels.instance }}"
424         annotations:
425           entries_used: "{{ $value | humanizePercentage }}"
426   - name: nominatim
427     rules:
428       - alert: nominatim replication delay
429         expr: nominatim_replication_delay > 10800
430         for: 1h
431         labels:
432           alertgroup: nominatim
433         annotations:
434           delay: "{{ $value | humanizeDuration }}"
435   - name: overpass
436     rules:
437       - alert: overpass osm database age
438         expr: overpass_database_age_seconds{database="osm"} > 3600
439         for: 1h
440         labels:
441           alertgroup: overpass
442         annotations:
443           age: "{{ $value | humanizeDuration }}"
444       - alert: overpass area database age
445         expr: overpass_database_age_seconds{database="area"} > 86400
446         for: 1h
447         labels:
448           alertgroup: overpass
449         annotations:
450           age: "{{ $value | humanizeDuration }}"
451   - name: passenger
452     rules:
453       - alert: passenger down
454         expr: passenger_up == 0
455         for: 5m
456         labels:
457           alertgroup: "{{ $labels.instance }}"
458       - alert: passenger queuing
459         expr: passenger_top_level_request_queue > 0
460         for: 5m
461         labels:
462           alertgroup: "{{ $labels.instance }}"
463       - alert: passenger application queuing
464         expr: passenger_app_request_queue > 0
465         for: 5m
466         labels:
467           alertgroup: "{{ $labels.instance }}"
468   - name: planet
469     rules:
470       - alert: planet dump overdue
471         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
472         for: 24h
473         labels:
474           alertgroup: planet
475         annotations:
476           overdue_by: "{{ $value | humanizeDuration }}"
477       - alert: notes dump overdue
478         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
479         for: 6h
480         labels:
481           alertgroup: planet
482         annotations:
483           overdue_by: "{{ $value | humanizeDuration }}"
484       - alert: daily replication feed delayed
485         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
486         for: 3h
487         labels:
488           alertgroup: planet
489         annotations:
490           delayed_by: "{{ $value | humanizeDuration }}"
491       - alert: hourly replication feed delayed
492         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
493         for: 30m
494         labels:
495           alertgroup: planet
496         annotations:
497           delayed_by: "{{ $value | humanizeDuration }}"
498       - alert: minutely replication feed delayed
499         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
500         for: 5m
501         labels:
502           alertgroup: planet
503         annotations:
504           delayed_by: "{{ $value | humanizeDuration }}"
505       - alert: changeset replication feed delayed
506         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
507         for: 5m
508         labels:
509           alertgroup: planet
510         annotations:
511           delayed_by: "{{ $value | humanizeDuration }}"
512   - name: postgresql
513     rules:
514       - alert: postgresql down
515         expr: pg_up == 0
516         for: 1m
517         labels:
518           alertgroup: "{{ $labels.instance }}"
519       - alert: postgresql replication delay
520         expr: pg_replication_lag_seconds > 30
521         for: 15m
522         labels:
523           alertgroup: "{{ $labels.instance }}"
524         annotations:
525           delay: "{{ $value | humanizeDuration }}"
526       - alert: postgresql connection limit
527         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
528         for: 1m
529         labels:
530           alertgroup: "{{ $labels.instance }}"
531         annotations:
532           connections_used: "{{ $value | humanizePercentage }}"
533       - alert: postgresql deadlocks
534         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
535         for: 0m
536         labels:
537           alertgroup: "{{ $labels.instance }}"
538         annotations:
539           new_deadlocks: "{{ $value }}"
540       - alert: postgresql slow queries
541         expr: pg_slow_queries > 0
542         for: 5m
543         labels:
544           alertgroup: "{{ $labels.instance }}"
545         annotations:
546           queries: "{{ $value }}"
547       - alert: postgresql idle transactions
548         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
549         for: 5m
550         labels:
551           alertgroup: "{{ $labels.instance }}"
552         annotations:
553           queries: "{{ $value }}"
554   - name: prometheus
555     rules:
556       - alert: prometheus configuration error
557         expr: prometheus_config_last_reload_successful == 0
558         for: 10m
559         labels:
560           alertgroup: "prometheus"
561       - alert: prometheus target missing
562         expr: up == 0
563         for: 10m
564         labels:
565           alertgroup: "prometheus"
566   - name: raid
567     rules:
568       - alert: raid controller battery failed
569         expr: ohai_controller_info{battery_status="failed"} > 0
570         for: 5m
571         labels:
572           alertgroup: "{{ $labels.instance }}"
573       - alert: raid controller battery recharging
574         expr: ohai_controller_info{battery_status="recharging"} > 0
575         for: 4h
576         labels:
577           alertgroup: "{{ $labels.instance }}"
578       - alert: raid array degraded
579         expr: ohai_array_info{status="degraded"} > 0
580         for: 5m
581         labels:
582           alertgroup: "{{ $labels.instance }}"
583       - alert: raid disk failed
584         expr: ohai_disk_info{status="failed"} > 0
585         for: 5m
586         labels:
587           alertgroup: "{{ $labels.instance }}"
588   - name: rasdaemon
589     rules:
590       - alert: memory controller errors
591         expr: increase(rasdaemon_mc_events_total[1m]) > 0
592         for: 0m
593         labels:
594           alertgroup: "{{ $labels.instance }}"
595         annotations:
596           new_errors: "{{ $value }}"
597       - alert: pcie aer errors
598         expr: increase(rasdaemon_aer_events_total[1m]) > 0
599         for: 0m
600         labels:
601           alertgroup: "{{ $labels.instance }}"
602         annotations:
603           new_errors: "{{ $value }}"
604   - name: smart
605     rules:
606       - alert: smart failure
607         expr: smart_health_status == 0
608         for: 60m
609         labels:
610           alertgroup: "{{ $labels.instance }}"
611       - alert: smart ssd wearout approaching
612         expr: smart_percentage_used / 100 >= 0.8
613         for: 60m
614         labels:
615           alertgroup: "{{ $labels.instance }}"
616         annotations:
617           percentage_used: "{{ $value | humanizePercentage }}"
618   - name: smokeping
619     rules:
620       - alert: packet loss
621         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
622         for: 10m
623         labels:
624           alertgroup: smokeping
625         annotations:
626           loss_rate: "{{ $value | humanizePercentage }}"
627   - name: snmp
628     rules:
629       - alert: snmp pdus missing
630         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
631         for: 15m
632         labels:
633           alertgroup: snmp
634         annotations:
635           missing_pdus: "{{ $value }}"
636   - name: ssl
637     rules:
638       - alert: ssl certificate probe failed
639         expr: ssl_probe_success == 0
640         for: 60m
641         labels:
642           alertgroup: ssl
643       - alert: ssl certificate expiry
644         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
645         for: 0m
646         labels:
647           alertgroup: ssl
648         annotations:
649           expires_in: "{{ $value | humanizeDuration }}"
650       - alert: ssl certificate revoked
651         expr: ssl_ocsp_response_status == 1
652         for: 0m
653         labels:
654           alertgroup: ssl
655       - alert: ocsp status unknown
656         expr: ssl_ocsp_response_status == 1
657         for: 0m
658         labels:
659           alertgroup: ssl
660   - name: statuscake
661     rules:
662       - alert: statuscake uptime check failing
663         expr: statuscake_paused == 0 and statuscake_up == 0
664         for: 10m
665         labels:
666           alertgroup: statuscake
667   - name: systemd
668     rules:
669       - alert: systemd failed service
670         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
671         for: 5m
672         labels:
673           alertgroup: "{{ $labels.instance }}"
674       - alert: systemd failed chef client service
675         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
676         for: 0m
677         labels:
678           alertgroup: "{{ $labels.instance }}"
679   - name: taginfo
680     rules:
681       - alert: taginfo planet age
682         expr: time() - taginfo_data_from_seconds > 129600
683         for: 0m
684         labels:
685           alertgroup: taginfo
686         annotations:
687           age: "{{ $value | humanizeDuration }}"
688       - alert: taginfo database age
689         expr: time() - taginfo_database_update_finish_seconds > 129600
690         for: 0m
691         labels:
692           alertgroup: taginfo
693         annotations:
694           age: "{{ $value | humanizeDuration }}"
695       - alert: taginfo database size
696         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
697         for: 30m
698         labels:
699           alertgroup: taginfo
700         annotations:
701           size_change: "{{ $value | humanizePercentage }}"
702   - name: tile
703     rules:
704       - alert: renderd replication delay
705         expr: renderd_replication_delay > 120
706         for: 15m
707         labels:
708           alertgroup: tile
709         annotations:
710           delay: "{{ $value | humanizeDuration }}"
711       - alert: missed tile rate
712         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
713         for: 5m
714         labels:
715           alertgroup: tile
716         annotations:
717           miss_rate: "{{ $value | humanizePercentage }}"
718       - alert: tile render rate
719         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
720         for: 15m
721         labels:
722           alertgroup: tile
723         annotations:
724           render_rate: "{{ $value }} tiles/s"
725   - name: time
726     rules:
727       - alert: clock not synchronising
728         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
729         for: 5m
730         labels:
731           alertgroup: "{{ $labels.instance }}"
732       - alert: clock skew detected
733         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
734         for: 5m
735         labels:
736           alertgroup: "{{ $labels.instance }}"
737         annotations:
738           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
739   - name: web
740     rules:
741       - alert: web error rate
742         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
743         for: 5m
744         labels:
745           alertgroup: web
746         annotations:
747           error_rate: "{{ $value | humanizePercentage }}"
748       - alert: job processing rate
749         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
750         for: 1h
751         labels:
752           alertgroup: web
753         annotations:
754           job_processing_rate: "{{ $value | humanizePercentage }}"