- name: amsterdam
rules:
- alert: pdu current draw
- expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
+ expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
for: 6m
labels:
alertgroup: "amsterdam"
annotations:
current: "{{ $value | humanize }}A"
- - alert: site current draw
- expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
+ - alert: site power
+ expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3
for: 6m
labels:
alertgroup: "amsterdam"
annotations:
- current: "{{ $value | humanize }}A"
+ current: "{{ $value | humanize }}kVA"
- alert: site temperature
expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
for: 6m
- name: cpu
rules:
- alert: cpu pressure
- expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
- for: 15m
+ expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
+ for: 60m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
- name: database
rules:
- alert: postgres replication delay
- expr: pg_replication_lag_seconds > 5
- for: 5m
+ expr: pg_replication_lag_seconds > 30
+ for: 15m
labels:
alertgroup: database
annotations:
delay: "{{ $value | humanizeDuration }}"
+ - name: discourse
+ rules:
+ - alert: discourse job failure rate
+ expr: rate(discourse_job_failures[5m]) > 0
+ for: 5m
+ labels:
+ alertgroup: discourse
+ annotations:
+ failure_rate: "{{ $value }} jobs/s"
- name: dublin
rules:
- alert: pdu current draw
- expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
+ expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
for: 6m
labels:
alertgroup: "dublin"
annotations:
current: "{{ $value | humanize }}A"
- - alert: site current draw
- expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
+ - alert: site power
+ expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
for: 6m
labels:
alertgroup: "dublin"
annotations:
- current: "{{ $value | humanize }}A"
+ current: "{{ $value | humanize }}kVA"
- alert: site temperature
expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
for: 6m
- name: network
rules:
- alert: interface transmit rate
- expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
+ expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
bandwidth_used: "{{ $value | humanizePercentage }}"
- alert: interface receive rate
- expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
+ expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
labels:
alertgroup: "{{ $labels.instance }}"
- alert: postgresql replication delay
- expr: pg_replication_lag_seconds > 5
- for: 1m
+ expr: pg_replication_lag_seconds > 30
+ for: 15m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
alertgroup: "{{ $labels.instance }}"
annotations:
queries: "{{ $value }}"
+ - alert: postgresql idle transactions
+ expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ queries: "{{ $value }}"
- name: prometheus
rules:
- alert: prometheus configuration error
alertgroup: "prometheus"
- name: raid
rules:
+ - alert: raid controller battery failed
+ expr: ohai_controller_info{battery_status="failed"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ - alert: raid controller battery recharging
+ expr: ohai_controller_info{battery_status="recharging"} > 0
+ for: 4h
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- alert: raid array degraded
expr: ohai_array_info{status="degraded"} > 0
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
- alert: smart ssd wearout approaching
- expr: smart_percentage_used >= 80
+ expr: smart_percentage_used / 100 >= 0.8
for: 60m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
percentage_used: "{{ $value | humanizePercentage }}"
+ - name: smokeping
+ rules:
+ - alert: packet loss
+ expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
+ for: 10m
+ labels:
+ alertgroup: smokeping
+ annotations:
+ loss_rate: "{{ $value | humanizePercentage }}"
- name: snmp
rules:
- alert: snmp pdus missing
- name: statuscake
rules:
- alert: statuscake uptime check failing
- expr: statuscake_uptime{status="down",paused="false"} > 0
+ expr: statuscake_paused == 0 and statuscake_up == 0
for: 10m
labels:
alertgroup: statuscake
labels:
alertgroup: "{{ $labels.instance }}"
- alert: systemd failed chef client service
- expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
- for: 6h
+ expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
+ for: 0m
labels:
alertgroup: "{{ $labels.instance }}"
+ - name: taginfo
+ rules:
+ - alert: taginfo planet age
+ expr: time() - taginfo_data_from_seconds > 129600
+ for: 0m
+ labels:
+ alertgroup: taginfo
+ annotations:
+ age: "{{ $value | humanizeDuration }}"
+ - alert: taginfo database age
+ expr: time() - taginfo_database_update_finish_seconds > 129600
+ for: 0m
+ labels:
+ alertgroup: taginfo
+ annotations:
+ age: "{{ $value | humanizeDuration }}"
+ - alert: taginfo database size
+ expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
+ for: 30m
+ labels:
+ alertgroup: taginfo
+ annotations:
+ size_change: "{{ $value | humanizePercentage }}"
- name: tile
rules:
- alert: renderd replication delay
annotations:
miss_rate: "{{ $value | humanizePercentage }}"
- alert: tile render rate
- expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) < 1
- for: 5m
+ expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
+ for: 15m
labels:
alertgroup: tile
annotations:
annotations:
error_rate: "{{ $value | humanizePercentage }}"
- alert: job processing rate
- expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
- for: 15m
+ expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
+ for: 1h
labels:
alertgroup: web
annotations: