1 # DO NOT EDIT - This file is being maintained by Chef
6 - alert: pdu current draw
7 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
10 alertgroup: "amsterdam"
12 current: "{{ $value | humanize }}A"
14 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
17 alertgroup: "amsterdam"
19 current: "{{ $value | humanize }}kVA"
20 - alert: site temperature
21 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
24 alertgroup: "amsterdam"
26 temperature: "{{ $value | humanize }}C"
27 - alert: site humidity
28 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
31 alertgroup: "amsterdam"
33 humidity: "{{ $value | humanizePercentage }}"
40 alertgroup: "{{ $labels.instance }}"
41 - alert: apache workers busy
42 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
45 alertgroup: "{{ $labels.instance }}"
47 busy_workers: "{{ $value | humanizePercentage }}"
50 - alert: chef client not running
51 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
54 alertgroup: "{{ $labels.instance }}"
56 down_time: "{{ $value | humanizeDuration }}"
59 - alert: cisco fan alarm
60 expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
63 alertgroup: "{{ $labels.site }}"
65 fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
66 - alert: cisco temperature alarm
67 expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
70 alertgroup: "{{ $labels.site }}"
72 temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
73 - alert: cisco main power alarm
74 expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
77 alertgroup: "{{ $labels.site }}"
78 - alert: cisco redundant power alarm
79 expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
82 alertgroup: "{{ $labels.site }}"
86 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
89 alertgroup: "{{ $labels.instance }}"
91 pressure: "{{ $value | humanizePercentage }}"
94 - alert: postgres replication delay
95 expr: pg_replication_lag_seconds > 30
100 delay: "{{ $value | humanizeDuration }}"
103 - alert: discourse job failure rate
104 expr: rate(discourse_job_failures[5m]) > 0
107 alertgroup: discourse
109 failure_rate: "{{ $value }} jobs/s"
112 - alert: pdu current draw
113 expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
118 current: "{{ $value | humanize }}A"
120 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
125 current: "{{ $value | humanize }}kVA"
126 - alert: site temperature
127 expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
132 temperature: "{{ $value | humanize }}C"
133 - alert: site humidity
134 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
139 humidity: "{{ $value | humanizePercentage }}"
142 - alert: fastly error rate
143 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
148 error_rate: "{{ $value | humanizePercentage }}"
149 - alert: fastly healthcheck failing
150 expr: count(fastly_healthcheck_status == 0) by (service) > 0
154 - alert: multiple fastly healthchecks failing
155 expr: count(fastly_healthcheck_status == 0) by (service) > 4
161 - alert: readonly filesystem
162 expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
165 alertgroup: "{{ $labels.instance }}"
166 - alert: filesystem low on space
167 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
170 alertgroup: "{{ $labels.instance }}"
172 percentage_free: "{{ $value | humanizePercentage }}"
173 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
174 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
175 - alert: filesystem low on inodes
176 expr: node_filesystem_files_free / node_filesystem_files < 0.1
179 alertgroup: "{{ $labels.instance }}"
181 percentage_free: "{{ $value | humanizePercentage }}"
182 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
183 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
186 - alert: hwmon fan alarm
187 expr: node_hwmon_fan_alarm == 1
190 alertgroup: "{{ $labels.instance }}"
192 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
193 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
194 - alert: hwmon temperature alarm
195 expr: node_hwmon_temp_alarm == 1
198 alertgroup: "{{ $labels.instance }}"
200 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
201 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
202 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
203 - alert: hwmon voltage alarm
204 expr: node_hwmon_in_alarm == 1
207 alertgroup: "{{ $labels.instance }}"
209 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
210 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
211 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
215 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
218 alertgroup: "{{ $labels.instance }}"
220 pressure: "{{ $value | humanizePercentage }}"
223 - alert: ipmi fan alarm
224 expr: ipmi_fan_speed_state > 0
227 alertgroup: "{{ $labels.instance }}"
229 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
230 - alert: ipmi temperature alarm
231 expr: ipmi_temperature_state > 0
234 alertgroup: "{{ $labels.instance }}"
236 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
237 - alert: ipmi voltage alarm
238 expr: ipmi_voltage_state > 0
241 alertgroup: "{{ $labels.instance }}"
243 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
244 - alert: ipmi power alarm
245 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
248 alertgroup: "{{ $labels.instance }}"
251 - alert: juniper cpu alarm
252 expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
255 alertgroup: "{{ $labels.site }}"
256 - alert: juniper fan alarm
257 expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
260 alertgroup: "{{ $labels.site }}"
261 - alert: juniper power alarm
262 expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
265 alertgroup: "{{ $labels.site }}"
272 alertgroup: "{{ $labels.instance }}"
273 - alert: exim queue length
274 expr: exim_queue > ignoring(job) exim_queue_limit
279 queue_length: "{{ $value }}"
280 - alert: mailman queue length
281 expr: mailman_queue_length > 200
286 queue_length: "{{ $value }}"
289 - alert: mdadm array inactive
290 expr: node_md_state{state="inactive"} > 0
293 alertgroup: "{{ $labels.instance }}"
295 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
296 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
297 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
298 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
299 - alert: mdadm array degraded
300 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
303 alertgroup: "{{ $labels.instance }}"
305 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
306 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
307 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
308 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
309 - alert: mdadm disk failed
310 expr: node_md_disks{state="failed"} > 0
313 alertgroup: "{{ $labels.instance }}"
315 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
316 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
317 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
318 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
322 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
325 alertgroup: "{{ $labels.instance }}"
327 memory_free: "{{ $value | humanizePercentage }}"
328 - alert: memory pressure
329 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
332 alertgroup: "{{ $labels.instance }}"
334 pressure: "{{ $value | humanizePercentage }}"
335 - alert: oom kill detected
336 expr: increase(node_vmstat_oom_kill[1m]) > 0
339 alertgroup: "{{ $labels.instance }}"
341 new_oom_kills: "{{ $value }}"
348 alertgroup: "{{ $labels.instance }}"
349 - alert: mysql connection limit
350 expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
353 alertgroup: "{{ $labels.instance }}"
355 connections_used: "{{ $value | humanizePercentage }}"
358 - alert: interface transmit rate
359 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
362 alertgroup: "{{ $labels.instance }}"
364 bandwidth_used: "{{ $value | humanizePercentage }}"
365 - alert: interface receive rate
366 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
369 alertgroup: "{{ $labels.instance }}"
371 bandwidth_used: "{{ $value | humanizePercentage }}"
372 - alert: interface transmit errors
373 expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
376 alertgroup: "{{ $labels.instance }}"
378 error_rate: "{{ $value | humanizePercentage }}"
379 - alert: wireguard interface transmit errors
380 expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
383 alertgroup: "{{ $labels.instance }}"
385 error_rate: "{{ $value | humanizePercentage }}"
386 - alert: interface receive errors
387 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
390 alertgroup: "{{ $labels.instance }}"
392 error_rate: "{{ $value | humanizePercentage }}"
393 - alert: conntrack entries
394 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
397 alertgroup: "{{ $labels.instance }}"
399 entries_used: "{{ $value | humanizePercentage }}"
402 - alert: nominatim replication delay
403 expr: nominatim_replication_delay > 10800
406 alertgroup: nominatim
408 delay: "{{ $value | humanizeDuration }}"
411 - alert: overpass osm database age
412 expr: overpass_database_age_seconds{database="osm"} > 3600
417 age: "{{ $value | humanizeDuration }}"
418 - alert: overpass area database age
419 expr: overpass_database_age_seconds{database="area"} > 86400
424 age: "{{ $value | humanizeDuration }}"
427 - alert: passenger down
428 expr: passenger_up == 0
431 alertgroup: "{{ $labels.instance }}"
432 - alert: passenger queuing
433 expr: passenger_top_level_request_queue > 0
436 alertgroup: "{{ $labels.instance }}"
437 - alert: passenger application queuing
438 expr: passenger_app_request_queue > 0
441 alertgroup: "{{ $labels.instance }}"
444 - alert: planet dump overdue
445 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
450 overdue_by: "{{ $value | humanizeDuration }}"
451 - alert: notes dump overdue
452 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
457 overdue_by: "{{ $value | humanizeDuration }}"
458 - alert: daily replication feed delayed
459 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
464 delayed_by: "{{ $value | humanizeDuration }}"
465 - alert: hourly replication feed delayed
466 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
471 delayed_by: "{{ $value | humanizeDuration }}"
472 - alert: minutely replication feed delayed
473 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
478 delayed_by: "{{ $value | humanizeDuration }}"
479 - alert: changeset replication feed delayed
480 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
485 delayed_by: "{{ $value | humanizeDuration }}"
488 - alert: postgresql down
492 alertgroup: "{{ $labels.instance }}"
493 - alert: postgresql replication delay
494 expr: pg_replication_lag_seconds > 30
497 alertgroup: "{{ $labels.instance }}"
499 delay: "{{ $value | humanizeDuration }}"
500 - alert: postgresql connection limit
501 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
504 alertgroup: "{{ $labels.instance }}"
506 connections_used: "{{ $value | humanizePercentage }}"
507 - alert: postgresql deadlocks
508 expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
511 alertgroup: "{{ $labels.instance }}"
513 new_deadlocks: "{{ $value }}"
514 - alert: postgresql slow queries
515 expr: pg_slow_queries > 0
518 alertgroup: "{{ $labels.instance }}"
520 queries: "{{ $value }}"
521 - alert: postgresql idle transactions
522 expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
525 alertgroup: "{{ $labels.instance }}"
527 queries: "{{ $value }}"
530 - alert: prometheus configuration error
531 expr: prometheus_config_last_reload_successful == 0
534 alertgroup: "prometheus"
535 - alert: prometheus target missing
539 alertgroup: "prometheus"
542 - alert: raid controller battery failed
543 expr: ohai_controller_info{battery_status="failed"} > 0
546 alertgroup: "{{ $labels.instance }}"
547 - alert: raid controller battery recharging
548 expr: ohai_controller_info{battery_status="recharging"} > 0
551 alertgroup: "{{ $labels.instance }}"
552 - alert: raid array degraded
553 expr: ohai_array_info{status="degraded"} > 0
556 alertgroup: "{{ $labels.instance }}"
557 - alert: raid disk failed
558 expr: ohai_disk_info{status="failed"} > 0
561 alertgroup: "{{ $labels.instance }}"
564 - alert: memory controller errors
565 expr: increase(rasdaemon_mc_events_total[1m]) > 0
568 alertgroup: "{{ $labels.instance }}"
570 new_errors: "{{ $value }}"
571 - alert: pcie aer errors
572 expr: increase(rasdaemon_aer_events_total[1m]) > 0
575 alertgroup: "{{ $labels.instance }}"
577 new_errors: "{{ $value }}"
580 - alert: smart failure
581 expr: smart_health_status == 0
584 alertgroup: "{{ $labels.instance }}"
585 - alert: smart ssd wearout approaching
586 expr: smart_percentage_used / 100 >= 0.8
589 alertgroup: "{{ $labels.instance }}"
591 percentage_used: "{{ $value | humanizePercentage }}"
595 expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
598 alertgroup: smokeping
600 loss_rate: "{{ $value | humanizePercentage }}"
603 - alert: snmp pdus missing
604 expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
609 missing_pdus: "{{ $value }}"
612 - alert: ssl certificate probe failed
613 expr: ssl_probe_success == 0
617 - alert: ssl certificate expiry
618 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
623 expires_in: "{{ $value | humanizeDuration }}"
624 - alert: ssl certificate revoked
625 expr: ssl_ocsp_response_status == 1
629 - alert: ocsp status unknown
630 expr: ssl_ocsp_response_status == 1
636 - alert: statuscake uptime check failing
637 expr: statuscake_paused == 0 and statuscake_up == 0
640 alertgroup: statuscake
643 - alert: systemd failed service
644 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
647 alertgroup: "{{ $labels.instance }}"
648 - alert: systemd failed chef client service
649 expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
652 alertgroup: "{{ $labels.instance }}"
655 - alert: taginfo planet age
656 expr: time() - taginfo_data_from_seconds > 129600
661 age: "{{ $value | humanizeDuration }}"
662 - alert: taginfo database age
663 expr: time() - taginfo_database_update_finish_seconds > 129600
668 age: "{{ $value | humanizeDuration }}"
669 - alert: taginfo database size
670 expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
675 size_change: "{{ $value | humanizePercentage }}"
678 - alert: renderd replication delay
679 expr: renderd_replication_delay > 120
684 delay: "{{ $value | humanizeDuration }}"
685 - alert: missed tile rate
686 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
691 miss_rate: "{{ $value | humanizePercentage }}"
692 - alert: tile render rate
693 expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
698 render_rate: "{{ $value }} tiles/s"
701 - alert: clock not synchronising
702 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
705 alertgroup: "{{ $labels.instance }}"
706 - alert: clock skew detected
707 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
710 alertgroup: "{{ $labels.instance }}"
712 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
715 - alert: web error rate
716 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
721 error_rate: "{{ $value | humanizePercentage }}"
722 - alert: job processing rate
723 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
728 job_processing_rate: "{{ $value | humanizePercentage }}"