1 # DO NOT EDIT - This file is being maintained by Chef
6 - alert: pdu current draw
7 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
10 alertgroup: "amsterdam"
12 current: "{{ $value | humanize }}A"
13 - alert: site current draw
14 expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
17 alertgroup: "amsterdam"
19 current: "{{ $value | humanize }}A"
20 - alert: site temperature
21 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
24 alertgroup: "amsterdam"
26 temperature: "{{ $value | humanize }}C"
27 - alert: site humidity
28 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
31 alertgroup: "amsterdam"
33 humidity: "{{ $value | humanizePercentage }}"
40 alertgroup: "{{ $labels.instance }}"
41 - alert: apache workers busy
42 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
45 alertgroup: "{{ $labels.instance }}"
47 busy_workers: "{{ $value | humanizePercentage }}"
48 - alert: apache low request rate
49 expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
52 alertgroup: "{{ $labels.instance }}"
54 request_rate: "{{ $value | humanizePercentage }}"
57 - alert: chef client not running
58 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
61 alertgroup: "{{ $labels.instance }}"
63 down_time: "{{ $value | humanizeDuration }}"
66 - alert: cisco fan alarm
67 expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
70 alertgroup: "{{ $labels.site }}"
72 fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73 - alert: cisco temperature alarm
74 expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
77 alertgroup: "{{ $labels.site }}"
79 temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80 - alert: cisco main power alarm
81 expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
84 alertgroup: "{{ $labels.site }}"
85 - alert: cisco redundant power alarm
86 expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
89 alertgroup: "{{ $labels.site }}"
93 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
96 alertgroup: "{{ $labels.instance }}"
98 pressure: "{{ $value | humanizePercentage }}"
101 - alert: postgres replication delay
102 expr: pg_replication_lag_seconds > 5
107 delay: "{{ $value | humanizeDuration }}"
110 - alert: pdu current draw
111 expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
116 current: "{{ $value | humanize }}A"
117 - alert: site current draw
118 expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
123 current: "{{ $value | humanize }}A"
124 - alert: site temperature
125 expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
130 temperature: "{{ $value | humanize }}C"
131 - alert: site humidity
132 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
137 humidity: "{{ $value | humanizePercentage }}"
140 - alert: fastly error rate
141 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
146 error_rate: "{{ $value | humanizePercentage }}"
147 - alert: fastly healthcheck failing
148 expr: count(fastly_healthcheck_status == 0) by (service) > 0
152 - alert: multiple fastly healthchecks failing
153 expr: count(fastly_healthcheck_status == 0) by (service) > 4
159 - alert: readonly filesystem
160 expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
163 alertgroup: "{{ $labels.instance }}"
164 - alert: filesystem low on space
165 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
168 alertgroup: "{{ $labels.instance }}"
170 percentage_free: "{{ $value | humanizePercentage }}"
171 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
172 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
173 - alert: filesystem low on inodes
174 expr: node_filesystem_files_free / node_filesystem_files < 0.1
177 alertgroup: "{{ $labels.instance }}"
179 percentage_free: "{{ $value | humanizePercentage }}"
180 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
181 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
184 - alert: hwmon fan alarm
185 expr: node_hwmon_fan_alarm == 1
188 alertgroup: "{{ $labels.instance }}"
190 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
191 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
192 - alert: hwmon temperature alarm
193 expr: node_hwmon_temp_alarm == 1
196 alertgroup: "{{ $labels.instance }}"
198 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
199 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
200 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
201 - alert: hwmon voltage alarm
202 expr: node_hwmon_in_alarm == 1
205 alertgroup: "{{ $labels.instance }}"
207 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
208 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
209 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
213 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
216 alertgroup: "{{ $labels.instance }}"
218 pressure: "{{ $value | humanizePercentage }}"
221 - alert: ipmi fan alarm
222 expr: ipmi_fan_speed_state > 0
225 alertgroup: "{{ $labels.instance }}"
227 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
228 - alert: ipmi temperature alarm
229 expr: ipmi_temperature_state > 0
232 alertgroup: "{{ $labels.instance }}"
234 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
235 - alert: ipmi voltage alarm
236 expr: ipmi_voltage_state > 0
239 alertgroup: "{{ $labels.instance }}"
241 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
242 - alert: ipmi power alarm
243 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
246 alertgroup: "{{ $labels.instance }}"
249 - alert: juniper cpu alarm
250 expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
253 alertgroup: "{{ $labels.site }}"
254 - alert: juniper fan alarm
255 expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
258 alertgroup: "{{ $labels.site }}"
259 - alert: juniper power alarm
260 expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
263 alertgroup: "{{ $labels.site }}"
270 alertgroup: "{{ $labels.instance }}"
271 - alert: exim queue length
272 expr: exim_queue > exim_queue_limit
277 queue_length: "{{ $value }}"
278 - alert: mailman queue length
279 expr: mailman_queue_length > 200
284 queue_length: "{{ $value }}"
287 - alert: mdadm array inactive
288 expr: node_md_state{state="inactive"} > 0
291 alertgroup: "{{ $labels.instance }}"
293 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
294 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
295 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
296 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
297 - alert: mdadm array degraded
298 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
301 alertgroup: "{{ $labels.instance }}"
303 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
304 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
305 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
306 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
307 - alert: mdadm disk failed
308 expr: node_md_disks{state="failed"} > 0
311 alertgroup: "{{ $labels.instance }}"
313 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
314 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
315 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
316 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
320 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
323 alertgroup: "{{ $labels.instance }}"
325 memory_free: "{{ $value | humanizePercentage }}"
326 - alert: memory pressure
327 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
330 alertgroup: "{{ $labels.instance }}"
332 pressure: "{{ $value | humanizePercentage }}"
333 - alert: oom kill detected
334 expr: increase(node_vmstat_oom_kill[1m]) > 0
337 alertgroup: "{{ $labels.instance }}"
339 new_oom_kills: "{{ $value }}"
346 alertgroup: "{{ $labels.instance }}"
347 - alert: mysql connection limit
348 expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
351 alertgroup: "{{ $labels.instance }}"
353 connections_used: "{{ $value | humanizePercentage }}"
356 - alert: interface transmit rate
357 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
360 alertgroup: "{{ $labels.instance }}"
362 bandwidth_used: "{{ $value | humanizePercentage }}"
363 - alert: interface receive rate
364 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
367 alertgroup: "{{ $labels.instance }}"
369 bandwidth_used: "{{ $value | humanizePercentage }}"
370 - alert: interface transmit errors
371 expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
374 alertgroup: "{{ $labels.instance }}"
376 error_rate: "{{ $value | humanizePercentage }}"
377 - alert: wireguard interface transmit errors
378 expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
381 alertgroup: "{{ $labels.instance }}"
383 error_rate: "{{ $value | humanizePercentage }}"
384 - alert: interface receive errors
385 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
388 alertgroup: "{{ $labels.instance }}"
390 error_rate: "{{ $value | humanizePercentage }}"
391 - alert: conntrack entries
392 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
395 alertgroup: "{{ $labels.instance }}"
397 entries_used: "{{ $value | humanizePercentage }}"
400 - alert: nominatim replication delay
401 expr: nominatim_replication_delay > 10800
404 alertgroup: nominatim
406 delay: "{{ $value | humanizeDuration }}"
409 - alert: overpass osm database age
410 expr: overpass_database_age_seconds{database="osm"} > 3600
415 age: "{{ $value | humanizeDuration }}"
416 - alert: overpass area database age
417 expr: overpass_database_age_seconds{database="area"} > 86400
422 age: "{{ $value | humanizeDuration }}"
425 - alert: passenger down
426 expr: passenger_up == 0
429 alertgroup: "{{ $labels.instance }}"
430 - alert: passenger queuing
431 expr: passenger_top_level_request_queue > 0
434 alertgroup: "{{ $labels.instance }}"
435 - alert: passenger application queuing
436 expr: passenger_app_request_queue > 0
439 alertgroup: "{{ $labels.instance }}"
442 - alert: planet dump overdue
443 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
448 overdue_by: "{{ $value | humanizeDuration }}"
449 - alert: notes dump overdue
450 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
455 overdue_by: "{{ $value | humanizeDuration }}"
456 - alert: daily replication feed delayed
457 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
462 delayed_by: "{{ $value | humanizeDuration }}"
463 - alert: hourly replication feed delayed
464 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
469 delayed_by: "{{ $value | humanizeDuration }}"
470 - alert: minutely replication feed delayed
471 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
476 delayed_by: "{{ $value | humanizeDuration }}"
477 - alert: changeset replication feed delayed
478 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
483 delayed_by: "{{ $value | humanizeDuration }}"
486 - alert: postgresql down
490 alertgroup: "{{ $labels.instance }}"
491 - alert: postgresql replication delay
492 expr: pg_replication_lag_seconds > 5
495 alertgroup: "{{ $labels.instance }}"
497 delay: "{{ $value | humanizeDuration }}"
498 - alert: postgresql connection limit
499 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
502 alertgroup: "{{ $labels.instance }}"
504 connections_used: "{{ $value | humanizePercentage }}"
505 - alert: postgresql deadlocks
506 expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
509 alertgroup: "{{ $labels.instance }}"
511 new_deadlocks: "{{ $value }}"
512 - alert: postgresql slow queries
513 expr: pg_slow_queries > 0
516 alertgroup: "{{ $labels.instance }}"
518 queries: "{{ $value }}"
521 - alert: prometheus configuration error
522 expr: prometheus_config_last_reload_successful == 0
525 alertgroup: "prometheus"
526 - alert: prometheus target missing
530 alertgroup: "prometheus"
533 - alert: raid array degraded
534 expr: ohai_array_info{status="degraded"} > 0
537 alertgroup: "{{ $labels.instance }}"
538 - alert: raid disk failed
539 expr: ohai_disk_info{status="failed"} > 0
542 alertgroup: "{{ $labels.instance }}"
545 - alert: memory controller errors
546 expr: increase(rasdaemon_mc_events_total[1m]) > 0
549 alertgroup: "{{ $labels.instance }}"
551 new_errors: "{{ $value }}"
552 - alert: pcie aer errors
553 expr: increase(rasdaemon_aer_events_total[1m]) > 0
556 alertgroup: "{{ $labels.instance }}"
558 new_errors: "{{ $value }}"
561 - alert: smart failure
562 expr: smart_health_status == 0
565 alertgroup: "{{ $labels.instance }}"
566 - alert: smart ssd wearout approaching
567 expr: smart_percentage_used >= 80
570 alertgroup: "{{ $labels.instance }}"
572 percentage_used: "{{ $value | humanizePercentage }}"
575 - alert: snmp pdus missing
576 expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
581 missing_pdus: "{{ $value }}"
584 - alert: ssl certificate probe failed
585 expr: ssl_probe_success == 0
589 - alert: ssl certificate expiry
590 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
595 expires_in: "{{ $value | humanizeDuration }}"
596 - alert: ssl certificate revoked
597 expr: ssl_ocsp_response_status == 1
601 - alert: ocsp status unknown
602 expr: ssl_ocsp_response_status == 1
608 - alert: statuscake uptime check failing
609 expr: statuscake_uptime{status="down",paused="false"} > 0
612 alertgroup: statuscake
615 - alert: systemd failed service
616 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
619 alertgroup: "{{ $labels.instance }}"
620 - alert: systemd failed chef client service
621 expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
624 alertgroup: "{{ $labels.instance }}"
627 - alert: renderd replication delay
628 expr: renderd_replication_delay > 120
633 delay: "{{ $value | humanizeDuration }}"
634 - alert: missed tile rate
635 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
640 miss_rate: "{{ $value | humanizePercentage }}"
641 - alert: tile render rate
642 expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
647 render_rate: "{{ $value }} tiles/s"
650 - alert: clock not synchronising
651 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
654 alertgroup: "{{ $labels.instance }}"
655 - alert: clock skew detected
656 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
659 alertgroup: "{{ $labels.instance }}"
661 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
664 - alert: web error rate
665 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
670 error_rate: "{{ $value | humanizePercentage }}"
671 - alert: job processing rate
672 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
677 job_processing_rate: "{{ $value | humanizePercentage }}"