]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Add alert rules for juniper switches
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: alertmanager
5     rules:
6       - alert: prometheus target missing
7         expr: up == 0
8         for: 5m
9         labels:
10           alertgroup: "prometheus"
11   - name: amsterdam
12     rules:
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site current draw
21         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}A"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 5m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache low request rate
56         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
57         for: 15m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           request_rate: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78       - alert: cisco temperature alarm
79         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
80         for: 5m
81         labels:
82           alertgroup: "{{ $labels.site }}"
83       - alert: cisco main power alarm
84         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
85         for: 5m
86         labels:
87           alertgroup: "{{ $labels.site }}"
88       - alert: cisco redundant power alarm
89         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
90         for: 5m
91         labels:
92           alertgroup: "{{ $labels.site }}"
93   - name: cpu
94     rules:
95       - alert: cpu pressure
96         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
97         for: 15m
98         labels:
99           alertgroup: "{{ $labels.instance }}"
100         annotations:
101           pressure: "{{ $value | humanizePercentage }}"
102   - name: database
103     rules:
104       - alert: postgres replication delay
105         expr: pg_replication_lag_seconds > 5
106         for: 5m
107         labels:
108           alertgroup: database
109         annotations:
110           delay: "{{ $value | humanizeDuration }}"
111   - name: fastly
112     rules:
113       - alert: error rate
114         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
115         for: 15m
116         labels:
117           alertgroup: fastly
118         annotations:
119           error_rate: "{{ $value | humanizePercentage }}"
120   - name: filesystem
121     rules:
122       - alert: readonly filesystem
123         expr: node_filesystem_readonly == 1
124         for: 0m
125         labels:
126           alertgroup: "{{ $labels.instance }}"
127       - alert: filesystem low on space
128         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
129         for: 5m
130         labels:
131           alertgroup: "{{ $labels.instance }}"
132         annotations:
133           percentage_free: "{{ $value | humanizePercentage }}"
134           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
135           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
136       - alert: filesystem low on inodes
137         expr: node_filesystem_files_free / node_filesystem_files < 0.1
138         for: 5m
139         labels:
140           alertgroup: "{{ $labels.instance }}"
141         annotations:
142           percentage_free: "{{ $value | humanizePercentage }}"
143           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
144           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
145   - name: hwmon
146     rules:
147       - alert: hwmon fan alarm
148         expr: node_hwmon_fan_alarm == 1
149         for: 5m
150         labels:
151           alertgroup: "{{ $labels.instance }}"
152         annotations:
153           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
154           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
155       - alert: hwmon temperature alarm
156         expr: node_hwmon_temp_alarm == 1
157         for: 5m
158         labels:
159           alertgroup: "{{ $labels.instance }}"
160         annotations:
161           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
162           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
163           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
164       - alert: hwmon voltage alarm
165         expr: node_hwmon_in_alarm == 1
166         for: 5m
167         labels:
168           alertgroup: "{{ $labels.instance }}"
169         annotations:
170           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
171           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
172           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
173   - name: io
174     rules:
175       - alert: io pressure
176         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
177         for: 60m
178         labels:
179           alertgroup: "{{ $labels.instance }}"
180         annotations:
181           pressure: "{{ $value | humanizePercentage }}"
182   - name: ipmi
183     rules:
184       - alert: ipmi fan alarm
185         expr: ipmi_fan_speed_state > 0
186         for: 5m
187         labels:
188           alertgroup: "{{ $labels.instance }}"
189         annotations:
190           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
191       - alert: ipmi temperature alarm
192         expr: ipmi_temperature_state > 0
193         for: 5m
194         labels:
195           alertgroup: "{{ $labels.instance }}"
196         annotations:
197           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
198       - alert: ipmi voltage alarm
199         expr: ipmi_voltage_state > 0
200         for: 5m
201         labels:
202           alertgroup: "{{ $labels.instance }}"
203         annotations:
204           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
205       - alert: ipmi power alarm
206         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
207         for: 5m
208         labels:
209           alertgroup: "{{ $labels.instance }}"
210   - name: juniper
211     rules:
212       - alert: juniper fan alarm
213         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
214         for: 5m
215         labels:
216           alertgroup: "{{ $labels.site }}"
217       - alert: juniper power alarm
218         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
219         for: 5m
220         labels:
221           alertgroup: "{{ $labels.site }}"
222   - name: mail
223     rules:
224       - alert: exim queue length
225         expr: exim_queue > exim_queue_limit
226         for: 60m
227         labels:
228           alertgroup: mail
229         annotations:
230           queue_length: "{{ $value }}"
231       - alert: mailman queue length
232         expr: mailman_queue_length > 200
233         for: 60m
234         labels:
235           alertgroup: mail
236         annotations:
237           queue_length: "{{ $value }}"
238   - name: mdadm
239     rules:
240       - alert: mdadm array inactive
241         expr: node_md_state{state="inactive"} > 0
242         for: 0m
243         labels:
244           alertgroup: "{{ $labels.instance }}"
245         annotations:
246           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
247           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
248           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
249           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
250       - alert: mdadm array degraded
251         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
252         for: 0m
253         labels:
254           alertgroup: "{{ $labels.instance }}"
255         annotations:
256           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
257           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
258           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
259           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
260       - alert: mdadm disk failed
261         expr: node_md_disks{state="failed"} > 0
262         for: 0m
263         labels:
264           alertgroup: "{{ $labels.instance }}"
265         annotations:
266           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
267           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
268           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
269           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
270   - name: memory
271     rules:
272       - alert: low memory
273         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
274         for: 15m
275         labels:
276           alertgroup: "{{ $labels.instance }}"
277         annotations:
278           memory_free: "{{ $value | humanizePercentage }}"
279       - alert: memory pressure
280         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
281         for: 60m
282         labels:
283           alertgroup: "{{ $labels.instance }}"
284         annotations:
285           pressure: "{{ $value | humanizePercentage }}"
286       - alert: oom kill detected
287         expr: increase(node_vmstat_oom_kill[1m]) > 0
288         for: 0m
289         labels:
290           alertgroup: "{{ $labels.instance }}"
291         annotations:
292           new_oom_kills: "{{ $value }}"
293   - name: network
294     rules:
295       - alert: interface transmit rate
296         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
297         for: 5m
298         labels:
299           alertgroup: "{{ $labels.instance }}"
300         annotations:
301           bandwidth_used: "{{ $value | humanizePercentage }}"
302       - alert: interface receive rate
303         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
304         for: 5m
305         labels:
306           alertgroup: "{{ $labels.instance }}"
307         annotations:
308           bandwidth_used: "{{ $value | humanizePercentage }}"
309       - alert: interface transmit errors
310         expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
311         for: 5m
312         labels:
313           alertgroup: "{{ $labels.instance }}"
314         annotations:
315           error_rate: "{{ $value | humanizePercentage }}"
316       - alert: interface receive errors
317         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
318         for: 5m
319         labels:
320           alertgroup: "{{ $labels.instance }}"
321         annotations:
322           error_rate: "{{ $value | humanizePercentage }}"
323       - alert: conntrack entries
324         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
325         for: 5m
326         labels:
327           alertgroup: "{{ $labels.instance }}"
328         annotations:
329           entries_used: "{{ $value | humanizePercentage }}"
330   - name: planet
331     rules:
332       - alert: planet dump overdue
333         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
334         for: 24h
335         labels:
336           alertgroup: planet
337         annotations:
338           overdue_by: "{{ $value | humanizeDuration }}"
339       - alert: notes dump overdue
340         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
341         for: 6h
342         labels:
343           alertgroup: planet
344         annotations:
345           overdue_by: "{{ $value | humanizeDuration }}"
346       - alert: daily replication feed delayed
347         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
348         for: 3h
349         labels:
350           alertgroup: planet
351         annotations:
352           delayed_by: "{{ $value | humanizeDuration }}"
353       - alert: hourly replication feed delayed
354         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
355         for: 30m
356         labels:
357           alertgroup: planet
358         annotations:
359           delayed_by: "{{ $value | humanizeDuration }}"
360       - alert: minutely replication feed delayed
361         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
362         for: 5m
363         labels:
364           alertgroup: planet
365         annotations:
366           delayed_by: "{{ $value | humanizeDuration }}"
367       - alert: changeset replication feed delayed
368         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
369         for: 5m
370         labels:
371           alertgroup: planet
372         annotations:
373           delayed_by: "{{ $value | humanizeDuration }}"
374   - name: postgresql
375     rules:
376       - alert: postgresql down
377         expr: pg_up == 0
378         for: 1m
379         labels:
380           alertgroup: "{{ $labels.instance }}"
381       - alert: postgresql replication delay
382         expr: pg_replication_lag_seconds > 5
383         for: 1m
384         labels:
385           alertgroup: "{{ $labels.instance }}"
386         annotations:
387           delay: "{{ $value | humanizeDuration }}"
388       - alert: postgresql connection limit
389         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
390         for: 1m
391         labels:
392           alertgroup: "{{ $labels.instance }}"
393         annotations:
394           connections_used: "{{ $value | humanizePercentage }}"
395       - alert: postgresql deadlocks
396         expr: increase(pg_stat_database_deadlocks[1m]) > 5
397         for: 0m
398         labels:
399           alertgroup: "{{ $labels.instance }}"
400         annotations:
401           new_deadlocks: "{{ $value }}"
402       - alert: postgresql slow queries
403         expr: pg_slow_queries > 0
404         for: 5m
405         labels:
406           alertgroup: "{{ $labels.instance }}"
407         annotations:
408           queries: "{{ $value }}"
409   - name: smart
410     rules:
411       - alert: smart failure
412         expr: smart_health_status == 0
413         for: 60m
414         labels:
415           alertgroup: "{{ $labels.instance }}"
416       - alert: smart ssd wearout approaching
417         expr: smart_percentage_used >= 90
418         for: 60m
419         labels:
420           alertgroup: "{{ $labels.instance }}"
421         annotations:
422           percentage_used: "{{ $value | humanizePercentage }}"
423   - name: ssl
424     rules:
425       - alert: ssl certificate probe failed
426         expr: ssl_probe_success == 0
427         for: 60m
428         labels:
429           alertgroup: ssl
430       - alert: ssl certificate expiry
431         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
432         for: 0m
433         labels:
434           alertgroup: ssl
435         annotations:
436           expires_in: "{{ $value | humanizeDuration }}"
437       - alert: ssl certificate revoked
438         expr: ssl_ocsp_response_status == 1
439         for: 0m
440         labels:
441           alertgroup: ssl
442       - alert: ocsp status unknown
443         expr: ssl_ocsp_response_status == 1
444         for: 0m
445         labels:
446           alertgroup: ssl
447   - name: statuscake
448     rules:
449       - alert: statuscake uptime check failing
450         expr: statuscake_uptime{status="down",paused="false"} > 0
451         for: 0m
452         labels:
453           alertgroup: statuscake
454   - name: systemd
455     rules:
456       - alert: systemd failed service
457         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
458         for: 5m
459         labels:
460           alertgroup: "{{ $labels.instance }}"
461       - alert: systemd failed service
462         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
463         for: 6h
464         labels:
465           alertgroup: "{{ $labels.instance }}"
466   - name: tile
467     rules:
468       - alert: renderd replication delay
469         expr: renderd_replication_delay > 120
470         for: 15m
471         labels:
472           alertgroup: tile
473         annotations:
474           delay: "{{ $value | humanizeDuration }}"
475       - alert: missed tile rate
476         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
477         for: 5m
478         labels:
479           alertgroup: tile
480         annotations:
481           miss_rate: "{{ $value | humanizePercentage }}"
482   - name: time
483     rules:
484       - alert: clock not synchronising
485         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
486         for: 5m
487         labels:
488           alertgroup: "{{ $labels.instance }}"
489       - alert: clock skew detected
490         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
491         for: 5m
492         labels:
493           alertgroup: "{{ $labels.instance }}"
494         annotations:
495           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
496   - name: web
497     rules:
498       - alert: web error rate
499         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
500         for: 5m
501         labels:
502           alertgroup: web
503         annotations:
504           error_rate: "{{ $value | humanizePercentage }}"
505       - alert: job processing rate
506         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
507         for: 15m
508         labels:
509           alertgroup: web
510         annotations:
511           job_processing_rate: "{{ $value | humanizePercentage }}"