groups:
- name: amsterdam
rules:
- - alert: uplink
+ - alert: he uplink
expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
for: 6m
labels:
alertgroup: "amsterdam"
annotations:
status: "{{ $value }}"
+ - alert: equinix uplink
+ expr: junos_interface_up{site="amsterdam",name=~"xe-[01]/2/0"} != 1
+ for: 6m
+ labels:
+ alertgroup: "amsterdam"
+ annotations:
+ status: "{{ $value }}"
- alert: pdu current draw
expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
for: 6m
annotations:
current: "{{ $value | humanize }}kVA"
- alert: site temperature
- expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
+ expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32
for: 6m
labels:
alertgroup: "amsterdam"
annotations:
temperature: "{{ $value | humanize }}C"
- alert: site humidity
- expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
+ expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8
for: 6m
labels:
alertgroup: "amsterdam"
labels:
alertgroup: database
annotations:
- delay: "{{ $value }}"
+ queries: "{{ $value }}"
- name: discourse
rules:
- alert: discourse job failure rate
failure_rate: "{{ $value }} jobs/s"
- name: dublin
rules:
- - alert: uplink
+ - alert: he uplink
expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
for: 6m
labels:
alertgroup: "dublin"
annotations:
status: "{{ $value }}"
+ - alert: equinix uplink
+ expr: junos_interface_up{site="dublin",name=~"xe-[01]/2/0"} != 1
+ for: 6m
+ labels:
+ alertgroup: "dublin"
+ annotations:
+ status: "{{ $value }}"
- alert: pdu current draw
expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
for: 6m
alertgroup: "{{ $labels.site }}"
annotations:
power: "{{ $value }} dBm"
+ - name: load
+ rules:
+ - alert: load average
+ expr: sum(node_load5) by (instance) / count(node_cpu_frequency_max_hertz) by (instance) > 2
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ load: "{{ $value | humanizePercentage }}"
- name: mail
rules:
- alert: exim down
alertgroup: "{{ $labels.instance }}"
annotations:
connections_used: "{{ $value | humanizePercentage }}"
+ - alert: mysql connection errors
+ expr: increase(mysql_global_status_connection_errors_total[1m]) > 0
+ for: 0m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ error_count: "{{ $value }}"
- name: network
rules:
- alert: interface redundancy lost
alertgroup: nominatim
annotations:
delay: "{{ $value | humanizeDuration }}"
+ - alert: nominatim connections
+ expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500
+ for: 15m
+ labels:
+ alertgroup: nominatim
- name: overpass
rules:
- alert: overpass osm database age
for: 10m
labels:
alertgroup: "prometheus"
+ - alert: node exporter text file scrape error
+ expr: node_textfile_scrape_error > 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
- name: raid
rules:
- alert: raid controller battery failed
alertgroup: "{{ $labels.instance }}"
annotations:
new_errors: "{{ $value }}"
+ - name: resolved
+ rules:
+ - alert: dnssec validation failures
+ expr: rate(resolved_dnssec_verdicts_total{result="bogus"}[1m]) > 1
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- name: smart
rules:
- alert: smart failure
- name: web
rules:
- alert: web error rate
- expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
+ expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.01
for: 5m
labels:
alertgroup: web