# DO NOT EDIT - This file is being maintained by Chef
groups:
- - name: alertmanager
- rules:
- - alert: prometheus target missing
- expr: up == 0
- for: 10m
- labels:
- alertgroup: "prometheus"
- name: amsterdam
rules:
- alert: pdu current draw
expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
- for: 5m
+ for: 6m
labels:
alertgroup: "amsterdam"
annotations:
current: "{{ $value | humanize }}A"
- alert: site current draw
expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
- for: 5m
+ for: 6m
labels:
alertgroup: "amsterdam"
annotations:
current: "{{ $value | humanize }}A"
- alert: site temperature
- expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
- for: 5m
+ expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
+ for: 6m
labels:
alertgroup: "amsterdam"
annotations:
temperature: "{{ $value | humanize }}C"
- alert: site humidity
expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
- for: 5m
+ for: 6m
labels:
alertgroup: "amsterdam"
annotations:
alertgroup: database
annotations:
delay: "{{ $value | humanizeDuration }}"
+ - name: dublin
+ rules:
+ - alert: pdu current draw
+ expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
+ for: 6m
+ labels:
+ alertgroup: "dublin"
+ annotations:
+ current: "{{ $value | humanize }}A"
+ - alert: site current draw
+ expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
+ for: 6m
+ labels:
+ alertgroup: "dublin"
+ annotations:
+ current: "{{ $value | humanize }}A"
+ - alert: site temperature
+ expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
+ for: 6m
+ labels:
+ alertgroup: "dublin"
+ annotations:
+ temperature: "{{ $value | humanize }}C"
+ - alert: site humidity
+ expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
+ for: 6m
+ labels:
+ alertgroup: "dublin"
+ annotations:
+ humidity: "{{ $value | humanizePercentage }}"
- name: fastly
rules:
- alert: fastly error rate
annotations:
error_rate: "{{ $value | humanizePercentage }}"
- alert: fastly healthcheck failing
- expr: fastly_healthcheck_status = 0
+ expr: count(fastly_healthcheck_status == 0) by (service) > 0
+ for: 15m
+ labels:
+ alertgroup: fastly
+ - alert: multiple fastly healthchecks failing
+ expr: count(fastly_healthcheck_status == 0) by (service) > 4
for: 5m
labels:
alertgroup: fastly
- name: filesystem
rules:
- alert: readonly filesystem
- expr: node_filesystem_readonly == 1
+ expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
for: 0m
labels:
alertgroup: "{{ $labels.instance }}"
alertgroup: "{{ $labels.instance }}"
- name: juniper
rules:
+ - alert: juniper cpu alarm
+ expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.site }}"
- alert: juniper fan alarm
- expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
+ expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
for: 5m
labels:
alertgroup: "{{ $labels.site }}"
- alert: juniper power alarm
- expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
+ expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
for: 5m
labels:
alertgroup: "{{ $labels.site }}"
- name: mail
rules:
+ - alert: exim down
+ expr: exim_up == 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- alert: exim queue length
expr: exim_queue > exim_queue_limit
for: 60m
alertgroup: "{{ $labels.instance }}"
annotations:
error_rate: "{{ $value | humanizePercentage }}"
- - alert: interface transmit errors
+ - alert: wireguard interface transmit errors
expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
for: 1h
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
entries_used: "{{ $value | humanizePercentage }}"
+ - name: nominatim
+ rules:
+ - alert: nominatim replication delay
+ expr: nominatim_replication_delay > 10800
+ for: 1h
+ labels:
+ alertgroup: nominatim
+ annotations:
+ delay: "{{ $value | humanizeDuration }}"
+ - name: overpass
+ rules:
+ - alert: overpass osm database age
+ expr: overpass_database_age_seconds{database="osm"} > 3600
+ for: 1h
+ labels:
+ alertgroup: overpass
+ annotations:
+ age: "{{ $value | humanizeDuration }}"
+ - alert: overpass area database age
+ expr: overpass_database_age_seconds{database="area"} > 86400
+ for: 1h
+ labels:
+ alertgroup: overpass
+ annotations:
+ age: "{{ $value | humanizeDuration }}"
+ - name: passenger
+ rules:
+ - alert: passenger down
+ expr: passenger_up == 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ - alert: passenger queuing
+ expr: passenger_top_level_request_queue > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ - alert: passenger application queuing
+ expr: passenger_app_request_queue > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- name: planet
rules:
- alert: planet dump overdue
annotations:
connections_used: "{{ $value | humanizePercentage }}"
- alert: postgresql deadlocks
- expr: increase(pg_stat_database_deadlocks[1m]) > 5
+ expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
for: 0m
labels:
alertgroup: "{{ $labels.instance }}"
alertgroup: "{{ $labels.instance }}"
annotations:
queries: "{{ $value }}"
+ - name: prometheus
+ rules:
+ - alert: prometheus configuration error
+ expr: prometheus_config_last_reload_successful == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
+ - alert: prometheus target missing
+ expr: up == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
- name: raid
rules:
- alert: raid array degraded
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
+ - name: rasdaemon
+ rules:
+ - alert: memory controller errors
+ expr: increase(rasdaemon_mc_events_total[1m]) > 0
+ for: 0m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ new_errors: "{{ $value }}"
+ - alert: pcie aer errors
+ expr: increase(rasdaemon_aer_events_total[1m]) > 0
+ for: 0m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ new_ercrors: "{{ $value }}"
- name: smart
rules:
- alert: smart failure
labels:
alertgroup: "{{ $labels.instance }}"
- alert: smart ssd wearout approaching
- expr: smart_percentage_used >= 90
+ expr: smart_percentage_used >= 80
for: 60m
labels:
alertgroup: "{{ $labels.instance }}"
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
- - alert: systemd failed service
+ - alert: systemd failed chef client service
expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
for: 6h
labels: