]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
417641cf0b84943247359f2b6c214df128b52282
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
8         for: 5m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site current draw
14         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48       - alert: apache low request rate
49         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
50         for: 15m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           request_rate: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
94         for: 15m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag_seconds > 5
103         for: 5m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: fastly
109     rules:
110       - alert: fastly error rate
111         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
112         for: 15m
113         labels:
114           alertgroup: fastly
115         annotations:
116           error_rate: "{{ $value | humanizePercentage }}"
117       - alert: fastly healthcheck failing
118         expr: count(fastly_healthcheck_status == 0) > 0
119         for: 15m
120         labels:
121           alertgroup: fastly
122       - alert: multipe fastly healthchecks failing
123         expr: count(fastly_healthcheck_status == 0) > 4
124         for: 5m
125         labels:
126           alertgroup: fastly
127   - name: filesystem
128     rules:
129       - alert: readonly filesystem
130         expr: node_filesystem_readonly == 1
131         for: 0m
132         labels:
133           alertgroup: "{{ $labels.instance }}"
134       - alert: filesystem low on space
135         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
136         for: 5m
137         labels:
138           alertgroup: "{{ $labels.instance }}"
139         annotations:
140           percentage_free: "{{ $value | humanizePercentage }}"
141           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
142           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
143       - alert: filesystem low on inodes
144         expr: node_filesystem_files_free / node_filesystem_files < 0.1
145         for: 5m
146         labels:
147           alertgroup: "{{ $labels.instance }}"
148         annotations:
149           percentage_free: "{{ $value | humanizePercentage }}"
150           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
151           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
152   - name: hwmon
153     rules:
154       - alert: hwmon fan alarm
155         expr: node_hwmon_fan_alarm == 1
156         for: 5m
157         labels:
158           alertgroup: "{{ $labels.instance }}"
159         annotations:
160           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
161           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
162       - alert: hwmon temperature alarm
163         expr: node_hwmon_temp_alarm == 1
164         for: 5m
165         labels:
166           alertgroup: "{{ $labels.instance }}"
167         annotations:
168           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
169           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
170           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
171       - alert: hwmon voltage alarm
172         expr: node_hwmon_in_alarm == 1
173         for: 5m
174         labels:
175           alertgroup: "{{ $labels.instance }}"
176         annotations:
177           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
178           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
179           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
180   - name: io
181     rules:
182       - alert: io pressure
183         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
184         for: 60m
185         labels:
186           alertgroup: "{{ $labels.instance }}"
187         annotations:
188           pressure: "{{ $value | humanizePercentage }}"
189   - name: ipmi
190     rules:
191       - alert: ipmi fan alarm
192         expr: ipmi_fan_speed_state > 0
193         for: 5m
194         labels:
195           alertgroup: "{{ $labels.instance }}"
196         annotations:
197           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
198       - alert: ipmi temperature alarm
199         expr: ipmi_temperature_state > 0
200         for: 5m
201         labels:
202           alertgroup: "{{ $labels.instance }}"
203         annotations:
204           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
205       - alert: ipmi voltage alarm
206         expr: ipmi_voltage_state > 0
207         for: 5m
208         labels:
209           alertgroup: "{{ $labels.instance }}"
210         annotations:
211           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
212       - alert: ipmi power alarm
213         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
214         for: 5m
215         labels:
216           alertgroup: "{{ $labels.instance }}"
217   - name: juniper
218     rules:
219       - alert: juniper fan alarm
220         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
221         for: 5m
222         labels:
223           alertgroup: "{{ $labels.site }}"
224       - alert: juniper power alarm
225         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
226         for: 5m
227         labels:
228           alertgroup: "{{ $labels.site }}"
229   - name: mail
230     rules:
231       - alert: exim queue length
232         expr: exim_queue > exim_queue_limit
233         for: 60m
234         labels:
235           alertgroup: mail
236         annotations:
237           queue_length: "{{ $value }}"
238       - alert: mailman queue length
239         expr: mailman_queue_length > 200
240         for: 60m
241         labels:
242           alertgroup: mail
243         annotations:
244           queue_length: "{{ $value }}"
245   - name: mdadm
246     rules:
247       - alert: mdadm array inactive
248         expr: node_md_state{state="inactive"} > 0
249         for: 0m
250         labels:
251           alertgroup: "{{ $labels.instance }}"
252         annotations:
253           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
254           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
255           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
256           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
257       - alert: mdadm array degraded
258         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
259         for: 0m
260         labels:
261           alertgroup: "{{ $labels.instance }}"
262         annotations:
263           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
264           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
265           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
266           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
267       - alert: mdadm disk failed
268         expr: node_md_disks{state="failed"} > 0
269         for: 0m
270         labels:
271           alertgroup: "{{ $labels.instance }}"
272         annotations:
273           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
274           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
275           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
276           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
277   - name: memory
278     rules:
279       - alert: low memory
280         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
281         for: 15m
282         labels:
283           alertgroup: "{{ $labels.instance }}"
284         annotations:
285           memory_free: "{{ $value | humanizePercentage }}"
286       - alert: memory pressure
287         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
288         for: 60m
289         labels:
290           alertgroup: "{{ $labels.instance }}"
291         annotations:
292           pressure: "{{ $value | humanizePercentage }}"
293       - alert: oom kill detected
294         expr: increase(node_vmstat_oom_kill[1m]) > 0
295         for: 0m
296         labels:
297           alertgroup: "{{ $labels.instance }}"
298         annotations:
299           new_oom_kills: "{{ $value }}"
300   - name: network
301     rules:
302       - alert: interface transmit rate
303         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
304         for: 5m
305         labels:
306           alertgroup: "{{ $labels.instance }}"
307         annotations:
308           bandwidth_used: "{{ $value | humanizePercentage }}"
309       - alert: interface receive rate
310         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
311         for: 5m
312         labels:
313           alertgroup: "{{ $labels.instance }}"
314         annotations:
315           bandwidth_used: "{{ $value | humanizePercentage }}"
316       - alert: interface transmit errors
317         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
318         for: 5m
319         labels:
320           alertgroup: "{{ $labels.instance }}"
321         annotations:
322           error_rate: "{{ $value | humanizePercentage }}"
323       - alert: wireguard interface transmit errors
324         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
325         for: 1h
326         labels:
327           alertgroup: "{{ $labels.instance }}"
328         annotations:
329           error_rate: "{{ $value | humanizePercentage }}"
330       - alert: interface receive errors
331         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
332         for: 5m
333         labels:
334           alertgroup: "{{ $labels.instance }}"
335         annotations:
336           error_rate: "{{ $value | humanizePercentage }}"
337       - alert: conntrack entries
338         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
339         for: 5m
340         labels:
341           alertgroup: "{{ $labels.instance }}"
342         annotations:
343           entries_used: "{{ $value | humanizePercentage }}"
344   - name: nominatim
345     rules:
346       - alert: nominatim replication delay
347         expr: nominatim_replication_delay > 10800
348         for: 1h
349         labels:
350           alertgroup: nominatim
351         annotations:
352           delay: "{{ $value | humanizeDuration }}"
353   - name: overpass
354     rules:
355       - alert: overpass osm database age
356         expr: overpass_database_age_seconds{database="osm"} > 300
357         for: 5m
358         labels:
359           alertgroup: overpass
360         annotations:
361           age: "{{ $value | humanizeDuration }}"
362       - alert: overpass area database age
363         expr: overpass_database_age_seconds{database="area"} > 86400
364         for: 1h
365         labels:
366           alertgroup: overpass
367         annotations:
368           age: "{{ $value | humanizeDuration }}"
369   - name: planet
370     rules:
371       - alert: planet dump overdue
372         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
373         for: 24h
374         labels:
375           alertgroup: planet
376         annotations:
377           overdue_by: "{{ $value | humanizeDuration }}"
378       - alert: notes dump overdue
379         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
380         for: 6h
381         labels:
382           alertgroup: planet
383         annotations:
384           overdue_by: "{{ $value | humanizeDuration }}"
385       - alert: daily replication feed delayed
386         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
387         for: 3h
388         labels:
389           alertgroup: planet
390         annotations:
391           delayed_by: "{{ $value | humanizeDuration }}"
392       - alert: hourly replication feed delayed
393         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
394         for: 30m
395         labels:
396           alertgroup: planet
397         annotations:
398           delayed_by: "{{ $value | humanizeDuration }}"
399       - alert: minutely replication feed delayed
400         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
401         for: 5m
402         labels:
403           alertgroup: planet
404         annotations:
405           delayed_by: "{{ $value | humanizeDuration }}"
406       - alert: changeset replication feed delayed
407         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
408         for: 5m
409         labels:
410           alertgroup: planet
411         annotations:
412           delayed_by: "{{ $value | humanizeDuration }}"
413   - name: postgresql
414     rules:
415       - alert: postgresql down
416         expr: pg_up == 0
417         for: 1m
418         labels:
419           alertgroup: "{{ $labels.instance }}"
420       - alert: postgresql replication delay
421         expr: pg_replication_lag_seconds > 5
422         for: 1m
423         labels:
424           alertgroup: "{{ $labels.instance }}"
425         annotations:
426           delay: "{{ $value | humanizeDuration }}"
427       - alert: postgresql connection limit
428         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
429         for: 1m
430         labels:
431           alertgroup: "{{ $labels.instance }}"
432         annotations:
433           connections_used: "{{ $value | humanizePercentage }}"
434       - alert: postgresql deadlocks
435         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
436         for: 0m
437         labels:
438           alertgroup: "{{ $labels.instance }}"
439         annotations:
440           new_deadlocks: "{{ $value }}"
441       - alert: postgresql slow queries
442         expr: pg_slow_queries > 0
443         for: 5m
444         labels:
445           alertgroup: "{{ $labels.instance }}"
446         annotations:
447           queries: "{{ $value }}"
448   - name: prometheus
449     rules:
450       - alert: prometheus configuration error
451         expr: prometheus_config_last_reload_successful == 0
452         for: 10m
453         labels:
454           alertgroup: "prometheus"
455       - alert: prometheus target missing
456         expr: up == 0
457         for: 10m
458         labels:
459           alertgroup: "prometheus"
460   - name: raid
461     rules:
462       - alert: raid array degraded
463         expr: ohai_array_info{status="degraded"} > 0
464         for: 5m
465         labels:
466           alertgroup: "{{ $labels.instance }}"
467       - alert: raid disk failed
468         expr: ohai_disk_info{status="failed"} > 0
469         for: 5m
470         labels:
471           alertgroup: "{{ $labels.instance }}"
472   - name: rasdaemon
473     rules:
474       - alert: memory controller errors
475         expr: increase(rasdaemon_mc_events_total[1m]) > 0
476         for: 0m
477         labels:
478           alertgroup: "{{ $labels.instance }}"
479         annotations:
480           new_errors: "{{ $value }}"
481       - alert: pcie aer errors
482         expr: increase(rasdaemon_aer_events_total[1m]) > 0
483         for: 0m
484         labels:
485           alertgroup: "{{ $labels.instance }}"
486         annotations:
487           new_ercrors: "{{ $value }}"
488   - name: smart
489     rules:
490       - alert: smart failure
491         expr: smart_health_status == 0
492         for: 60m
493         labels:
494           alertgroup: "{{ $labels.instance }}"
495       - alert: smart ssd wearout approaching
496         expr: smart_percentage_used >= 80
497         for: 60m
498         labels:
499           alertgroup: "{{ $labels.instance }}"
500         annotations:
501           percentage_used: "{{ $value | humanizePercentage }}"
502   - name: ssl
503     rules:
504       - alert: ssl certificate probe failed
505         expr: ssl_probe_success == 0
506         for: 60m
507         labels:
508           alertgroup: ssl
509       - alert: ssl certificate expiry
510         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
511         for: 0m
512         labels:
513           alertgroup: ssl
514         annotations:
515           expires_in: "{{ $value | humanizeDuration }}"
516       - alert: ssl certificate revoked
517         expr: ssl_ocsp_response_status == 1
518         for: 0m
519         labels:
520           alertgroup: ssl
521       - alert: ocsp status unknown
522         expr: ssl_ocsp_response_status == 1
523         for: 0m
524         labels:
525           alertgroup: ssl
526   - name: statuscake
527     rules:
528       - alert: statuscake uptime check failing
529         expr: statuscake_uptime{status="down",paused="false"} > 0
530         for: 10m
531         labels:
532           alertgroup: statuscake
533   - name: systemd
534     rules:
535       - alert: systemd failed service
536         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
537         for: 5m
538         labels:
539           alertgroup: "{{ $labels.instance }}"
540       - alert: systemd failed chef client service
541         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
542         for: 6h
543         labels:
544           alertgroup: "{{ $labels.instance }}"
545   - name: tile
546     rules:
547       - alert: renderd replication delay
548         expr: renderd_replication_delay > 120
549         for: 15m
550         labels:
551           alertgroup: tile
552         annotations:
553           delay: "{{ $value | humanizeDuration }}"
554       - alert: missed tile rate
555         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
556         for: 5m
557         labels:
558           alertgroup: tile
559         annotations:
560           miss_rate: "{{ $value | humanizePercentage }}"
561   - name: time
562     rules:
563       - alert: clock not synchronising
564         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
565         for: 5m
566         labels:
567           alertgroup: "{{ $labels.instance }}"
568       - alert: clock skew detected
569         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
570         for: 5m
571         labels:
572           alertgroup: "{{ $labels.instance }}"
573         annotations:
574           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
575   - name: web
576     rules:
577       - alert: web error rate
578         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
579         for: 5m
580         labels:
581           alertgroup: web
582         annotations:
583           error_rate: "{{ $value | humanizePercentage }}"
584       - alert: job processing rate
585         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
586         for: 15m
587         labels:
588           alertgroup: web
589         annotations:
590           job_processing_rate: "{{ $value | humanizePercentage }}"