1 # DO NOT EDIT - This file is being maintained by Chef
6 - alert: prometheus target missing
10 alertgroup: "prometheus"
17 alertgroup: "{{ $labels.instance }}"
18 - alert: apache workers busy
19 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
22 alertgroup: "{{ $labels.instance }}"
24 busy_workers: "{{ $value | humanizePercentage }}"
27 - alert: postgres replication delay
28 expr: pg_replication_lag_seconds > 5
33 delay: "{{ $value | humanizeDuration }}"
36 - alert: filesystem low on space
37 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
40 alertgroup: "{{ $labels.instance }}"
42 percentage_free: "{{ $value | humanizePercentage }}"
43 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
44 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
45 - alert: filesystem low on inodes
46 expr: node_filesystem_files_free / node_filesystem_files < 0.1
49 alertgroup: "{{ $labels.instance }}"
51 percentage_free: "{{ $value | humanizePercentage }}"
52 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
53 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
56 - alert: hwmon fan alarm
57 expr: node_hwmon_fan_alarm == 1
60 alertgroup: "{{ $labels.instance }}"
62 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
63 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
64 - alert: hwmon temperature alarm
65 expr: node_hwmon_temp_alarm == 1
68 alertgroup: "{{ $labels.instance }}"
70 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
71 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
72 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
73 - alert: hwmon voltage alarm
74 expr: node_hwmon_in_alarm == 1
77 alertgroup: "{{ $labels.instance }}"
79 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
80 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
81 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
84 - alert: ipmi fan alarm
85 expr: ipmi_fan_speed_state > 0
88 alertgroup: "{{ $labels.instance }}"
90 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
91 - alert: ipmi temperature alarm
92 expr: ipmi_temperature_state > 0
95 alertgroup: "{{ $labels.instance }}"
97 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
98 - alert: ipmi voltage alarm
99 expr: ipmi_voltage_state > 0
102 alertgroup: "{{ $labels.instance }}"
104 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
105 - alert: ipmi power alarm
106 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
109 alertgroup: "{{ $labels.instance }}"
112 - alert: mdadm array inactive
113 expr: node_md_state{state="inactive"} > 0
116 alertgroup: "{{ $labels.instance }}"
118 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
119 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
120 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
121 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
122 - alert: mdadm array degraded
123 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
126 alertgroup: "{{ $labels.instance }}"
128 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
129 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
130 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
131 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
132 - alert: mdadm disk failed
133 expr: node_md_disks{state="failed"} > 0
136 alertgroup: "{{ $labels.instance }}"
138 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
139 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
140 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
141 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
145 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
148 alertgroup: "{{ $labels.instance }}"
150 memory_free: "{{ $value | humanizePercentage }}"
151 - alert: memory pressure
152 expr: rate(node_vmstat_pgmajfault[1m]) > 1000
155 alertgroup: "{{ $labels.instance }}"
157 major_page_faults: "{{ $value }} faults/s"
158 - alert: oom kill detected
159 expr: increase(node_vmstat_oom_kill[1m]) > 0
162 alertgroup: "{{ $labels.instance }}"
164 new_oom_kills: "{{ $value }}"
167 - alert: interface transmit rate
168 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
171 alertgroup: "{{ $labels.instance }}"
173 bandwidth_used: "{{ $value | humanizePercentage }}"
174 - alert: interface receive rate
175 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
178 alertgroup: "{{ $labels.instance }}"
180 bandwidth_used: "{{ $value | humanizePercentage }}"
181 - alert: interface transmit errors
182 expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
185 alertgroup: "{{ $labels.instance }}"
187 error_rate: "{{ $value | humanizePercentage }}"
188 - alert: interface receive errors
189 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
192 alertgroup: "{{ $labels.instance }}"
194 error_rate: "{{ $value | humanizePercentage }}"
195 - alert: conntrack entries
196 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
199 alertgroup: "{{ $labels.instance }}"
201 entries_used: "{{ $value | humanizePercentage }}"
204 - alert: planet dump overdue
205 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
210 overdue_by: "{{ $value | humanizeDuration }}"
211 - alert: notes dump overdue
212 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
217 overdue_by: "{{ $value | humanizeDuration }}"
218 - alert: daily replication feed delayed
219 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
224 delayed_by: "{{ $value | humanizeDuration }}"
225 - alert: hourly replication feed delayed
226 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
231 delayed_by: "{{ $value | humanizeDuration }}"
232 - alert: minutely replication feed delayed
233 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
238 delayed_by: "{{ $value | humanizeDuration }}"
239 - alert: changeset replication feed delayed
240 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
245 delayed_by: "{{ $value | humanizeDuration }}"
248 - alert: postgresql down
252 alertgroup: "{{ $labels.instance }}"
253 - alert: postgresql replication delay
254 expr: pg_replication_lag_seconds > 5
257 alertgroup: "{{ $labels.instance }}"
259 delay: "{{ $value | humanizeDuration }}"
260 - alert: postgresql connection limit
261 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
264 alertgroup: "{{ $labels.instance }}"
266 connections_used: "{{ $value | humanizePercentage }}"
267 - alert: postgresql deadlocks
268 expr: increase(pg_stat_database_deadlocks[1m]) > 5
271 alertgroup: "{{ $labels.instance }}"
273 new_deadlocks: "{{ $value }}"
274 - alert: postgresql slow queries
275 expr: pg_slow_queries > 0
278 alertgroup: "{{ $labels.instance }}"
280 queries: "{{ $value }}"
283 - alert: smart failure
284 expr: smart_health_status == 0
287 alertgroup: "{{ $labels.instance }}"
288 - alert: smart ssd wearout approaching
289 expr: smart_percentage_used >= 90
292 alertgroup: "{{ $labels.instance }}"
294 percentage_used: "{{ $value | humanizePercentage }}"
297 - alert: ssl certificate probe failed
298 expr: ssl_probe_success == 0
302 - alert: ssl certificate expiry
303 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
308 expires_in: "{{ $value | humanizeDuration }}"
309 - alert: ssl certificate revoked
310 expr: ssl_ocsp_response_status == 1
314 - alert: ocsp status unknown
315 expr: ssl_ocsp_response_status == 1
321 - alert: systemd failed service
322 expr: node_systemd_unit_state{state="failed"} == 1
325 alertgroup: "{{ $labels.instance }}"
328 - alert: renderd replication delay
329 expr: renderd_replication_delay > 120
334 delay: "{{ $value | humanizeDuration }}"
335 - alert: missed tile rate
336 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
341 miss_rate: "{{ $value | humanizePercentage }}"
344 - alert: clock not synchronising
345 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
348 alertgroup: "{{ $labels.instance }}"
349 - alert: clock skew detected
350 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
353 alertgroup: "{{ $labels.instance }}"
355 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
358 - alert: web error rate
359 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
364 error_rate: "{{ $value | humanizePercentage }}"