1 # DO NOT EDIT - This file is being maintained by Chef
7 expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
10 alertgroup: "amsterdam"
12 status: "{{ $value }}"
13 - alert: pdu current draw
14 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
17 alertgroup: "amsterdam"
19 current: "{{ $value | humanize }}A"
21 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
24 alertgroup: "amsterdam"
26 current: "{{ $value | humanize }}kVA"
27 - alert: site temperature
28 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
31 alertgroup: "amsterdam"
33 temperature: "{{ $value | humanize }}C"
34 - alert: site humidity
35 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
38 alertgroup: "amsterdam"
40 humidity: "{{ $value | humanizePercentage }}"
47 alertgroup: "{{ $labels.instance }}"
48 - alert: apache workers busy
49 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
52 alertgroup: "{{ $labels.instance }}"
54 busy_workers: "{{ $value | humanizePercentage }}"
55 - alert: apache connection limit
56 expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
59 alertgroup: "{{ $labels.instance }}"
61 connections: "{{ $value | humanizePercentage }}"
64 - alert: chef client not running
65 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
68 alertgroup: "{{ $labels.instance }}"
70 down_time: "{{ $value | humanizeDuration }}"
73 - alert: cisco fan alarm
74 expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
77 alertgroup: "{{ $labels.site }}"
79 fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80 - alert: cisco temperature alarm
81 expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
84 alertgroup: "{{ $labels.site }}"
86 temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87 - alert: cisco main power alarm
88 expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
91 alertgroup: "{{ $labels.site }}"
92 - alert: cisco redundant power alarm
93 expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
96 alertgroup: "{{ $labels.site }}"
100 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
103 alertgroup: "{{ $labels.instance }}"
105 pressure: "{{ $value | humanizePercentage }}"
108 - alert: postgres replication delay
109 expr: pg_replication_lag_seconds > 30
114 delay: "{{ $value | humanizeDuration }}"
117 - alert: discourse job failure rate
118 expr: rate(discourse_job_failures[5m]) > 0
121 alertgroup: discourse
123 failure_rate: "{{ $value }} jobs/s"
127 expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
132 status: "{{ $value }}"
133 - alert: pdu current draw
134 expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
139 current: "{{ $value | humanize }}A"
141 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
146 current: "{{ $value | humanize }}kVA"
147 - alert: site temperature
148 expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
153 temperature: "{{ $value | humanize }}C"
154 - alert: site humidity
155 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
160 humidity: "{{ $value | humanizePercentage }}"
163 - alert: fastly error rate
164 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
169 error_rate: "{{ $value | humanizePercentage }}"
170 - alert: fastly frontend healthcheck warning
171 expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
175 - alert: fastly frontend healthcheck critical
176 expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
180 - alert: fastly backend healthcheck warning
181 expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
185 - alert: fastly backend healthcheck critical
186 expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
192 - alert: readonly filesystem
193 expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
196 alertgroup: "{{ $labels.instance }}"
197 - alert: filesystem low on space
198 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
201 alertgroup: "{{ $labels.instance }}"
203 percentage_free: "{{ $value | humanizePercentage }}"
204 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
205 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
206 - alert: filesystem low on inodes
207 expr: node_filesystem_files_free / node_filesystem_files < 0.1
210 alertgroup: "{{ $labels.instance }}"
212 percentage_free: "{{ $value | humanizePercentage }}"
213 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
214 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
217 - alert: hwmon fan alarm
218 expr: node_hwmon_fan_alarm == 1
221 alertgroup: "{{ $labels.instance }}"
223 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
224 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
225 - alert: hwmon temperature alarm
226 expr: node_hwmon_temp_alarm == 1
229 alertgroup: "{{ $labels.instance }}"
231 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
232 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
233 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
234 - alert: hwmon voltage alarm
235 expr: node_hwmon_in_alarm == 1
238 alertgroup: "{{ $labels.instance }}"
240 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
241 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
242 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
246 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
249 alertgroup: "{{ $labels.instance }}"
251 pressure: "{{ $value | humanizePercentage }}"
254 - alert: ipmi fan alarm
255 expr: ipmi_fan_speed_state > 0
258 alertgroup: "{{ $labels.instance }}"
260 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
261 - alert: ipmi temperature alarm
262 expr: ipmi_temperature_state > 0
265 alertgroup: "{{ $labels.instance }}"
267 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
268 - alert: ipmi voltage alarm
269 expr: ipmi_voltage_state > 0
272 alertgroup: "{{ $labels.instance }}"
274 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
275 - alert: ipmi power alarm
276 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
279 alertgroup: "{{ $labels.instance }}"
282 - alert: juniper red alarms
283 expr: juniper_alarms_red_count > 0
286 alertgroup: "{{ $labels.site }}"
288 alarm_count: "{{ $value }} alarms"
289 - alert: juniper yellow alarms
290 expr: juniper_alarms_yellow_count > 0
293 alertgroup: "{{ $labels.site }}"
295 alarm_count: "{{ $value }} alarms"
296 - alert: juniper cpu alarm
297 expr: junos_route_engine_load_average_five / 2 > 0.5
300 alertgroup: "{{ $labels.site }}"
302 load_average: "{{ $value | humanizePercentage }}"
303 - alert: juniper fan alarm
304 expr: junos_environment_fan_up != 1
307 alertgroup: "{{ $labels.site }}"
308 - alert: juniper power alarm
309 expr: junos_environment_power_up != 1
312 alertgroup: "{{ $labels.site }}"
313 - alert: juniper laser receive power
314 expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
317 alertgroup: "{{ $labels.site }}"
319 power: "{{ $value }} dBm"
320 - alert: juniper laser transmit power
321 expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
324 alertgroup: "{{ $labels.site }}"
326 power: "{{ $value }} dBm"
333 alertgroup: "{{ $labels.instance }}"
334 - alert: exim queue length
335 expr: exim_queue > ignoring(job) exim_queue_limit
340 queue_length: "{{ $value }}"
341 - alert: mailman queue length
342 expr: mailman_queue_length > 200
347 queue_length: "{{ $value }}"
350 - alert: mdadm array inactive
351 expr: node_md_state{state="inactive"} > 0
354 alertgroup: "{{ $labels.instance }}"
356 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
357 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
358 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
359 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
360 - alert: mdadm array degraded
361 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
364 alertgroup: "{{ $labels.instance }}"
366 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
367 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
368 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
369 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
370 - alert: mdadm disk failed
371 expr: node_md_disks{state="failed"} > 0
374 alertgroup: "{{ $labels.instance }}"
376 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
377 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
378 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
379 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
383 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
386 alertgroup: "{{ $labels.instance }}"
388 memory_free: "{{ $value | humanizePercentage }}"
389 - alert: memory pressure
390 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
393 alertgroup: "{{ $labels.instance }}"
395 pressure: "{{ $value | humanizePercentage }}"
396 - alert: oom kill detected
397 expr: increase(node_vmstat_oom_kill[1m]) > 0
400 alertgroup: "{{ $labels.instance }}"
402 new_oom_kills: "{{ $value }}"
409 alertgroup: "{{ $labels.instance }}"
410 - alert: mysql connection limit
411 expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
414 alertgroup: "{{ $labels.instance }}"
416 connections_used: "{{ $value | humanizePercentage }}"
419 - alert: interface transmit rate
420 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
423 alertgroup: "{{ $labels.instance }}"
425 bandwidth_used: "{{ $value | humanizePercentage }}"
426 - alert: interface receive rate
427 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
430 alertgroup: "{{ $labels.instance }}"
432 bandwidth_used: "{{ $value | humanizePercentage }}"
433 - alert: interface transmit errors
434 expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
437 alertgroup: "{{ $labels.instance }}"
439 error_rate: "{{ $value | humanizePercentage }}"
440 - alert: wireguard interface transmit errors
441 expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
444 alertgroup: "{{ $labels.instance }}"
446 error_rate: "{{ $value | humanizePercentage }}"
447 - alert: interface receive errors
448 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
451 alertgroup: "{{ $labels.instance }}"
453 error_rate: "{{ $value | humanizePercentage }}"
454 - alert: conntrack entries
455 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
458 alertgroup: "{{ $labels.instance }}"
460 entries_used: "{{ $value | humanizePercentage }}"
463 - alert: nominatim replication delay
464 expr: nominatim_replication_delay > 10800
467 alertgroup: nominatim
469 delay: "{{ $value | humanizeDuration }}"
472 - alert: overpass osm database age
473 expr: overpass_database_age_seconds{database="osm"} > 3600
478 age: "{{ $value | humanizeDuration }}"
479 - alert: overpass area database age
480 expr: overpass_database_age_seconds{database="area"} > 86400
485 age: "{{ $value | humanizeDuration }}"
488 - alert: passenger down
489 expr: passenger_up == 0
492 alertgroup: "{{ $labels.instance }}"
493 - alert: passenger queuing
494 expr: passenger_top_level_request_queue > 0
497 alertgroup: "{{ $labels.instance }}"
498 - alert: passenger application queuing
499 expr: passenger_app_request_queue > 0
502 alertgroup: "{{ $labels.instance }}"
505 - alert: planet dump overdue
506 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
511 overdue_by: "{{ $value | humanizeDuration }}"
512 - alert: notes dump overdue
513 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
518 overdue_by: "{{ $value | humanizeDuration }}"
519 - alert: daily replication feed delayed
520 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
525 delayed_by: "{{ $value | humanizeDuration }}"
526 - alert: hourly replication feed delayed
527 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
532 delayed_by: "{{ $value | humanizeDuration }}"
533 - alert: minutely replication feed delayed
534 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
539 delayed_by: "{{ $value | humanizeDuration }}"
540 - alert: changeset replication feed delayed
541 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
546 delayed_by: "{{ $value | humanizeDuration }}"
549 - alert: postgresql down
553 alertgroup: "{{ $labels.instance }}"
554 - alert: postgresql replication delay
555 expr: pg_replication_lag_seconds > 30
558 alertgroup: "{{ $labels.instance }}"
560 delay: "{{ $value | humanizeDuration }}"
561 - alert: postgresql connection limit
562 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
565 alertgroup: "{{ $labels.instance }}"
567 connections_used: "{{ $value | humanizePercentage }}"
568 - alert: postgresql deadlocks
569 expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
572 alertgroup: "{{ $labels.instance }}"
574 new_deadlocks: "{{ $value }}"
575 - alert: postgresql slow queries
576 expr: pg_slow_queries > 0
579 alertgroup: "{{ $labels.instance }}"
581 queries: "{{ $value }}"
582 - alert: postgresql idle transactions
583 expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
586 alertgroup: "{{ $labels.instance }}"
588 queries: "{{ $value }}"
591 - alert: prometheus configuration error
592 expr: prometheus_config_last_reload_successful == 0
595 alertgroup: "prometheus"
596 - alert: prometheus target missing
600 alertgroup: "prometheus"
603 - alert: raid controller battery failed
604 expr: ohai_controller_info{battery_status="failed"} > 0
607 alertgroup: "{{ $labels.instance }}"
608 - alert: raid controller battery recharging
609 expr: ohai_controller_info{battery_status="recharging"} > 0
612 alertgroup: "{{ $labels.instance }}"
613 - alert: raid array degraded
614 expr: ohai_array_info{status="degraded"} > 0
617 alertgroup: "{{ $labels.instance }}"
618 - alert: raid disk failed
619 expr: ohai_disk_info{status="failed"} > 0
622 alertgroup: "{{ $labels.instance }}"
625 - alert: memory controller errors
626 expr: increase(rasdaemon_mc_events_total[1m]) > 0
629 alertgroup: "{{ $labels.instance }}"
631 new_errors: "{{ $value }}"
632 - alert: pcie aer errors
633 expr: increase(rasdaemon_aer_events_total[1m]) > 0
636 alertgroup: "{{ $labels.instance }}"
638 new_errors: "{{ $value }}"
641 - alert: smart failure
642 expr: smart_health_status == 0
645 alertgroup: "{{ $labels.instance }}"
646 - alert: smart ssd wearout approaching
647 expr: smart_percentage_used / 100 >= 0.8
650 alertgroup: "{{ $labels.instance }}"
652 percentage_used: "{{ $value | humanizePercentage }}"
656 expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
659 alertgroup: smokeping
661 loss_rate: "{{ $value | humanizePercentage }}"
664 - alert: snmp pdus missing
665 expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
670 missing_pdus: "{{ $value }}"
673 - alert: ssl certificate probe failed
674 expr: ssl_probe_success == 0
678 - alert: ssl certificate expiry
679 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
684 expires_in: "{{ $value | humanizeDuration }}"
685 - alert: ssl certificate revoked
686 expr: ssl_ocsp_response_status == 1
690 - alert: ocsp status unknown
691 expr: ssl_ocsp_response_status == 1
697 - alert: statuscake uptime check failing
698 expr: statuscake_paused == 0 and statuscake_up == 0
701 alertgroup: statuscake
704 - alert: systemd failed service
705 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
708 alertgroup: "{{ $labels.instance }}"
709 - alert: systemd failed chef client service
710 expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
713 alertgroup: "{{ $labels.instance }}"
716 - alert: taginfo planet age
717 expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
722 age: "{{ $value | humanizeDuration }}"
723 - alert: taginfo database age
724 expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
729 age: "{{ $value | humanizeDuration }}"
730 - alert: taginfo database size
731 expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
736 size_change: "{{ $value | humanizePercentage }}"
739 - alert: renderd replication delay
740 expr: renderd_replication_delay > 120
745 delay: "{{ $value | humanizeDuration }}"
746 - alert: missed tile rate
747 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
752 miss_rate: "{{ $value | humanizePercentage }}"
753 - alert: tile render rate
754 expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
759 render_rate: "{{ $value }} tiles/s"
762 - alert: clock not synchronising
763 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
766 alertgroup: "{{ $labels.instance }}"
767 - alert: clock skew detected
768 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
771 alertgroup: "{{ $labels.instance }}"
773 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
776 - alert: web error rate
777 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
782 error_rate: "{{ $value | humanizePercentage }}"
783 - alert: job processing rate
784 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
789 job_processing_rate: "{{ $value | humanizePercentage }}"