1 # DO NOT EDIT - This file is being maintained by Chef
7 expr: ifOperStatus{site="amsterdam",ifName=~"ge-[01]/2/2"} != 1
10 alertgroup: "amsterdam"
12 status: "{{ $value }}"
13 - alert: pdu current draw
14 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
17 alertgroup: "amsterdam"
19 current: "{{ $value | humanize }}A"
21 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
24 alertgroup: "amsterdam"
26 current: "{{ $value | humanize }}kVA"
27 - alert: site temperature
28 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
31 alertgroup: "amsterdam"
33 temperature: "{{ $value | humanize }}C"
34 - alert: site humidity
35 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
38 alertgroup: "amsterdam"
40 humidity: "{{ $value | humanizePercentage }}"
47 alertgroup: "{{ $labels.instance }}"
48 - alert: apache workers busy
49 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
52 alertgroup: "{{ $labels.instance }}"
54 busy_workers: "{{ $value | humanizePercentage }}"
57 - alert: chef client not running
58 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
61 alertgroup: "{{ $labels.instance }}"
63 down_time: "{{ $value | humanizeDuration }}"
66 - alert: cisco fan alarm
67 expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
70 alertgroup: "{{ $labels.site }}"
72 fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73 - alert: cisco temperature alarm
74 expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
77 alertgroup: "{{ $labels.site }}"
79 temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80 - alert: cisco main power alarm
81 expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
84 alertgroup: "{{ $labels.site }}"
85 - alert: cisco redundant power alarm
86 expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
89 alertgroup: "{{ $labels.site }}"
93 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
96 alertgroup: "{{ $labels.instance }}"
98 pressure: "{{ $value | humanizePercentage }}"
101 - alert: postgres replication delay
102 expr: pg_replication_lag > 30
107 delay: "{{ $value | humanizeDuration }}"
110 - alert: discourse job failure rate
111 expr: rate(discourse_job_failures[5m]) > 0
114 alertgroup: discourse
116 failure_rate: "{{ $value }} jobs/s"
120 expr: ifOperStatus{site="dublin",ifName=~"ge-[01]/2/2"} != 1
125 status: "{{ $value }}"
126 - alert: pdu current draw
127 expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
132 current: "{{ $value | humanize }}A"
134 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
139 current: "{{ $value | humanize }}kVA"
140 - alert: site temperature
141 expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
146 temperature: "{{ $value | humanize }}C"
147 - alert: site humidity
148 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
153 humidity: "{{ $value | humanizePercentage }}"
156 - alert: fastly error rate
157 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
162 error_rate: "{{ $value | humanizePercentage }}"
163 - alert: fastly healthcheck failing
164 expr: count(fastly_healthcheck_status == 0) by (service) > 0
168 - alert: multiple fastly healthchecks failing
169 expr: count(fastly_healthcheck_status == 0) by (service) > 4
175 - alert: readonly filesystem
176 expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
179 alertgroup: "{{ $labels.instance }}"
180 - alert: filesystem low on space
181 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
184 alertgroup: "{{ $labels.instance }}"
186 percentage_free: "{{ $value | humanizePercentage }}"
187 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
188 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
189 - alert: filesystem low on inodes
190 expr: node_filesystem_files_free / node_filesystem_files < 0.1
193 alertgroup: "{{ $labels.instance }}"
195 percentage_free: "{{ $value | humanizePercentage }}"
196 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
197 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
200 - alert: hwmon fan alarm
201 expr: node_hwmon_fan_alarm == 1
204 alertgroup: "{{ $labels.instance }}"
206 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
207 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
208 - alert: hwmon temperature alarm
209 expr: node_hwmon_temp_alarm == 1
212 alertgroup: "{{ $labels.instance }}"
214 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
215 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
216 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
217 - alert: hwmon voltage alarm
218 expr: node_hwmon_in_alarm == 1
221 alertgroup: "{{ $labels.instance }}"
223 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
224 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
225 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
229 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
232 alertgroup: "{{ $labels.instance }}"
234 pressure: "{{ $value | humanizePercentage }}"
237 - alert: ipmi fan alarm
238 expr: ipmi_fan_speed_state > 0
241 alertgroup: "{{ $labels.instance }}"
243 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
244 - alert: ipmi temperature alarm
245 expr: ipmi_temperature_state > 0
248 alertgroup: "{{ $labels.instance }}"
250 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
251 - alert: ipmi voltage alarm
252 expr: ipmi_voltage_state > 0
255 alertgroup: "{{ $labels.instance }}"
257 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
258 - alert: ipmi power alarm
259 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
262 alertgroup: "{{ $labels.instance }}"
265 - alert: juniper cpu alarm
266 expr: jnxOperating5MinLoadAvg{jnxOperatingContentsIndex="9"} / 200 > 0.5
269 alertgroup: "{{ $labels.site }}"
271 load_average: "{{ $value | humanizePercentage }}"
272 - alert: juniper fan alarm
273 expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
276 alertgroup: "{{ $labels.site }}"
277 - alert: juniper power alarm
278 expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
281 alertgroup: "{{ $labels.site }}"
288 alertgroup: "{{ $labels.instance }}"
289 - alert: exim queue length
290 expr: exim_queue > ignoring(job) exim_queue_limit
295 queue_length: "{{ $value }}"
296 - alert: mailman queue length
297 expr: mailman_queue_length > 200
302 queue_length: "{{ $value }}"
305 - alert: mdadm array inactive
306 expr: node_md_state{state="inactive"} > 0
309 alertgroup: "{{ $labels.instance }}"
311 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
312 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
313 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
314 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
315 - alert: mdadm array degraded
316 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
319 alertgroup: "{{ $labels.instance }}"
321 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
322 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
323 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
324 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
325 - alert: mdadm disk failed
326 expr: node_md_disks{state="failed"} > 0
329 alertgroup: "{{ $labels.instance }}"
331 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
332 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
333 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
334 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
338 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
341 alertgroup: "{{ $labels.instance }}"
343 memory_free: "{{ $value | humanizePercentage }}"
344 - alert: memory pressure
345 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
348 alertgroup: "{{ $labels.instance }}"
350 pressure: "{{ $value | humanizePercentage }}"
351 - alert: oom kill detected
352 expr: increase(node_vmstat_oom_kill[1m]) > 0
355 alertgroup: "{{ $labels.instance }}"
357 new_oom_kills: "{{ $value }}"
364 alertgroup: "{{ $labels.instance }}"
365 - alert: mysql connection limit
366 expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
369 alertgroup: "{{ $labels.instance }}"
371 connections_used: "{{ $value | humanizePercentage }}"
374 - alert: interface transmit rate
375 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
378 alertgroup: "{{ $labels.instance }}"
380 bandwidth_used: "{{ $value | humanizePercentage }}"
381 - alert: interface receive rate
382 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
385 alertgroup: "{{ $labels.instance }}"
387 bandwidth_used: "{{ $value | humanizePercentage }}"
388 - alert: interface transmit errors
389 expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
392 alertgroup: "{{ $labels.instance }}"
394 error_rate: "{{ $value | humanizePercentage }}"
395 - alert: wireguard interface transmit errors
396 expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
399 alertgroup: "{{ $labels.instance }}"
401 error_rate: "{{ $value | humanizePercentage }}"
402 - alert: interface receive errors
403 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
406 alertgroup: "{{ $labels.instance }}"
408 error_rate: "{{ $value | humanizePercentage }}"
409 - alert: conntrack entries
410 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
413 alertgroup: "{{ $labels.instance }}"
415 entries_used: "{{ $value | humanizePercentage }}"
418 - alert: nominatim replication delay
419 expr: nominatim_replication_delay > 10800
422 alertgroup: nominatim
424 delay: "{{ $value | humanizeDuration }}"
427 - alert: overpass osm database age
428 expr: overpass_database_age_seconds{database="osm"} > 3600
433 age: "{{ $value | humanizeDuration }}"
434 - alert: overpass area database age
435 expr: overpass_database_age_seconds{database="area"} > 86400
440 age: "{{ $value | humanizeDuration }}"
443 - alert: passenger down
444 expr: passenger_up == 0
447 alertgroup: "{{ $labels.instance }}"
448 - alert: passenger queuing
449 expr: passenger_top_level_request_queue > 0
452 alertgroup: "{{ $labels.instance }}"
453 - alert: passenger application queuing
454 expr: passenger_app_request_queue > 0
457 alertgroup: "{{ $labels.instance }}"
460 - alert: planet dump overdue
461 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
466 overdue_by: "{{ $value | humanizeDuration }}"
467 - alert: notes dump overdue
468 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
473 overdue_by: "{{ $value | humanizeDuration }}"
474 - alert: daily replication feed delayed
475 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
480 delayed_by: "{{ $value | humanizeDuration }}"
481 - alert: hourly replication feed delayed
482 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
487 delayed_by: "{{ $value | humanizeDuration }}"
488 - alert: minutely replication feed delayed
489 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
494 delayed_by: "{{ $value | humanizeDuration }}"
495 - alert: changeset replication feed delayed
496 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
501 delayed_by: "{{ $value | humanizeDuration }}"
504 - alert: postgresql down
508 alertgroup: "{{ $labels.instance }}"
509 - alert: postgresql replication delay
510 expr: pg_replication_lag > 30
513 alertgroup: "{{ $labels.instance }}"
515 delay: "{{ $value | humanizeDuration }}"
516 - alert: postgresql connection limit
517 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
520 alertgroup: "{{ $labels.instance }}"
522 connections_used: "{{ $value | humanizePercentage }}"
523 - alert: postgresql deadlocks
524 expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
527 alertgroup: "{{ $labels.instance }}"
529 new_deadlocks: "{{ $value }}"
530 - alert: postgresql slow queries
531 expr: pg_slow_queries > 0
534 alertgroup: "{{ $labels.instance }}"
536 queries: "{{ $value }}"
537 - alert: postgresql idle transactions
538 expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
541 alertgroup: "{{ $labels.instance }}"
543 queries: "{{ $value }}"
546 - alert: prometheus configuration error
547 expr: prometheus_config_last_reload_successful == 0
550 alertgroup: "prometheus"
551 - alert: prometheus target missing
555 alertgroup: "prometheus"
558 - alert: raid controller battery failed
559 expr: ohai_controller_info{battery_status="failed"} > 0
562 alertgroup: "{{ $labels.instance }}"
563 - alert: raid controller battery recharging
564 expr: ohai_controller_info{battery_status="recharging"} > 0
567 alertgroup: "{{ $labels.instance }}"
568 - alert: raid array degraded
569 expr: ohai_array_info{status="degraded"} > 0
572 alertgroup: "{{ $labels.instance }}"
573 - alert: raid disk failed
574 expr: ohai_disk_info{status="failed"} > 0
577 alertgroup: "{{ $labels.instance }}"
580 - alert: memory controller errors
581 expr: increase(rasdaemon_mc_events_total[1m]) > 0
584 alertgroup: "{{ $labels.instance }}"
586 new_errors: "{{ $value }}"
587 - alert: pcie aer errors
588 expr: increase(rasdaemon_aer_events_total[1m]) > 0
591 alertgroup: "{{ $labels.instance }}"
593 new_errors: "{{ $value }}"
596 - alert: smart failure
597 expr: smart_health_status == 0
600 alertgroup: "{{ $labels.instance }}"
601 - alert: smart ssd wearout approaching
602 expr: smart_percentage_used / 100 >= 0.8
605 alertgroup: "{{ $labels.instance }}"
607 percentage_used: "{{ $value | humanizePercentage }}"
611 expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
614 alertgroup: smokeping
616 loss_rate: "{{ $value | humanizePercentage }}"
619 - alert: snmp pdus missing
620 expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
625 missing_pdus: "{{ $value }}"
628 - alert: ssl certificate probe failed
629 expr: ssl_probe_success == 0
633 - alert: ssl certificate expiry
634 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
639 expires_in: "{{ $value | humanizeDuration }}"
640 - alert: ssl certificate revoked
641 expr: ssl_ocsp_response_status == 1
645 - alert: ocsp status unknown
646 expr: ssl_ocsp_response_status == 1
652 - alert: statuscake uptime check failing
653 expr: statuscake_paused == 0 and statuscake_up == 0
656 alertgroup: statuscake
659 - alert: systemd failed service
660 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
663 alertgroup: "{{ $labels.instance }}"
664 - alert: systemd failed chef client service
665 expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
668 alertgroup: "{{ $labels.instance }}"
671 - alert: taginfo planet age
672 expr: time() - taginfo_data_from_seconds > 129600
677 age: "{{ $value | humanizeDuration }}"
678 - alert: taginfo database age
679 expr: time() - taginfo_database_update_finish_seconds > 129600
684 age: "{{ $value | humanizeDuration }}"
685 - alert: taginfo database size
686 expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
691 size_change: "{{ $value | humanizePercentage }}"
694 - alert: renderd replication delay
695 expr: renderd_replication_delay > 120
700 delay: "{{ $value | humanizeDuration }}"
701 - alert: missed tile rate
702 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
707 miss_rate: "{{ $value | humanizePercentage }}"
708 - alert: tile render rate
709 expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
714 render_rate: "{{ $value }} tiles/s"
717 - alert: clock not synchronising
718 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
721 alertgroup: "{{ $labels.instance }}"
722 - alert: clock skew detected
723 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
726 alertgroup: "{{ $labels.instance }}"
728 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
731 - alert: web error rate
732 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
737 error_rate: "{{ $value | humanizePercentage }}"
738 - alert: job processing rate
739 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
744 job_processing_rate: "{{ $value | humanizePercentage }}"