# DO NOT EDIT - This file is being maintained by Chef
groups:
- - name: alertmanager
+ - name: amsterdam
rules:
- - alert: prometheus target missing
- expr: up == 0
+ - alert: pdu current draw
+ expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
for: 5m
labels:
- alertgroup: "prometheus"
+ alertgroup: "amsterdam"
+ annotations:
+ current: "{{ $value | humanize }}A"
+ - alert: site current draw
+ expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
+ for: 5m
+ labels:
+ alertgroup: "amsterdam"
+ annotations:
+ current: "{{ $value | humanize }}A"
+ - alert: site temperature
+ expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
+ for: 5m
+ labels:
+ alertgroup: "amsterdam"
+ annotations:
+ temperature: "{{ $value | humanize }}C"
+ - alert: site humidity
+ expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
+ for: 5m
+ labels:
+ alertgroup: "amsterdam"
+ annotations:
+ humidity: "{{ $value | humanizePercentage }}"
- name: apache
rules:
- alert: apache down
alertgroup: "{{ $labels.instance }}"
annotations:
busy_workers: "{{ $value | humanizePercentage }}"
+ - alert: apache low request rate
+ expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
+ for: 15m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ request_rate: "{{ $value | humanizePercentage }}"
+ - name: chef
+ rules:
+ - alert: chef client not running
+ expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
+ for: 12h
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ down_time: "{{ $value | humanizeDuration }}"
+ - name: cisco
+ rules:
+ - alert: cisco fan alarm
+ expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
+ annotations:
+ fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
+ - alert: cisco temperature alarm
+ expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
+ annotations:
+ temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
+ - alert: cisco main power alarm
+ expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
+ - alert: cisco redundant power alarm
+ expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
- name: cpu
rules:
- alert: cpu pressure
- expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.3
+ expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
for: 15m
labels:
alertgroup: "{{ $labels.instance }}"
alertgroup: database
annotations:
delay: "{{ $value | humanizeDuration }}"
+ - name: fastly
+ rules:
+ - alert: fastly error rate
+ expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
+ for: 15m
+ labels:
+ alertgroup: fastly
+ annotations:
+ error_rate: "{{ $value | humanizePercentage }}"
+ - alert: fastly healthcheck failing
+ expr: count(fastly_healthcheck_status == 0) > 0
+ for: 15m
+ labels:
+ alertgroup: fastly
+ - alert: fastly healthcheck failing
+ expr: count(fastly_healthcheck_status == 0) > 4
+ for: 5m
+ labels:
+ alertgroup: fastly
- name: filesystem
rules:
- alert: readonly filesystem
in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
- name: io
rules:
- - alert: cpu pressure
+ - alert: io pressure
expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
for: 60m
labels:
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
+ - name: juniper
+ rules:
+ - alert: juniper fan alarm
+ expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
+ - alert: juniper power alarm
+ expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
+ - name: mail
+ rules:
+ - alert: exim queue length
+ expr: exim_queue > exim_queue_limit
+ for: 60m
+ labels:
+ alertgroup: mail
+ annotations:
+ queue_length: "{{ $value }}"
+ - alert: mailman queue length
+ expr: mailman_queue_length > 200
+ for: 60m
+ labels:
+ alertgroup: mail
+ annotations:
+ queue_length: "{{ $value }}"
- name: mdadm
rules:
- alert: mdadm array inactive
annotations:
bandwidth_used: "{{ $value | humanizePercentage }}"
- alert: interface transmit errors
- expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+ expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
error_rate: "{{ $value | humanizePercentage }}"
+ - alert: interface transmit errors
+ expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
+ for: 1h
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ error_rate: "{{ $value | humanizePercentage }}"
- alert: interface receive errors
expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
for: 5m
alertgroup: "{{ $labels.instance }}"
annotations:
queries: "{{ $value }}"
+ - name: prometheus
+ rules:
+ - alert: prometheus configuration error
+ expr: prometheus_config_last_reload_successful == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
+ - alert: prometheus target missing
+ expr: up == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
+ - name: raid
+ rules:
+ - alert: raid array degraded
+ expr: ohai_array_info{status="degraded"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ - alert: raid disk failed
+ expr: ohai_disk_info{status="failed"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- name: smart
rules:
- alert: smart failure
labels:
alertgroup: "{{ $labels.instance }}"
- alert: smart ssd wearout approaching
- expr: smart_percentage_used >= 90
+ expr: smart_percentage_used >= 80
for: 60m
labels:
alertgroup: "{{ $labels.instance }}"
for: 0m
labels:
alertgroup: ssl
+ - name: statuscake
+ rules:
+ - alert: statuscake uptime check failing
+ expr: statuscake_uptime{status="down",paused="false"} > 0
+ for: 10m
+ labels:
+ alertgroup: statuscake
- name: systemd
rules:
- alert: systemd failed service
- expr: node_systemd_unit_state{state="failed"} == 1
+ expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
+ - alert: systemd failed service
+ expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
+ for: 6h
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- name: tile
rules:
- alert: renderd replication delay
expr: renderd_replication_delay > 120
- for: 5m
+ for: 15m
labels:
alertgroup: tile
annotations:
alertgroup: web
annotations:
error_rate: "{{ $value | humanizePercentage }}"
+ - alert: job processing rate
+ expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
+ for: 15m
+ labels:
+ alertgroup: web
+ annotations:
+ job_processing_rate: "{{ $value | humanizePercentage }}"