# DO NOT EDIT - This file is being maintained by Chef
groups:
- - name: alertmanager
- rules:
- - alert: prometheus target missing
- expr: up == 0
- for: 5m
- labels:
- alertgroup: "prometheus"
- name: amsterdam
rules:
- alert: pdu current draw
for: 5m
labels:
alertgroup: "{{ $labels.site }}"
+ annotations:
+ fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
- alert: cisco temperature alarm
expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
for: 5m
labels:
alertgroup: "{{ $labels.site }}"
+ annotations:
+ temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
- alert: cisco main power alarm
expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
for: 5m
delay: "{{ $value | humanizeDuration }}"
- name: fastly
rules:
- - alert: error rate
+ - alert: fastly error rate
expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
for: 15m
labels:
alertgroup: fastly
annotations:
error_rate: "{{ $value | humanizePercentage }}"
+ - alert: fastly healthcheck failing
+ expr: count(fastly_healthcheck_status == 0) > 0
+ for: 15m
+ labels:
+ alertgroup: fastly
+ - alert: fastly healthcheck failing
+ expr: count(fastly_healthcheck_status == 0) > 4
+ for: 5m
+ labels:
+ alertgroup: fastly
- name: filesystem
rules:
- alert: readonly filesystem
annotations:
bandwidth_used: "{{ $value | humanizePercentage }}"
- alert: interface transmit errors
- expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+ expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
for: 5m
labels:
alertgroup: "{{ $labels.instance }}"
annotations:
error_rate: "{{ $value | humanizePercentage }}"
+ - alert: interface transmit errors
+ expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
+ for: 1h
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ annotations:
+ error_rate: "{{ $value | humanizePercentage }}"
- alert: interface receive errors
expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
for: 5m
alertgroup: "{{ $labels.instance }}"
annotations:
queries: "{{ $value }}"
+ - name: prometheus
+ rules:
+ - alert: prometheus configuration error
+ expr: prometheus_config_last_reload_successful == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
+ - alert: prometheus target missing
+ expr: up == 0
+ for: 10m
+ labels:
+ alertgroup: "prometheus"
+ - name: raid
+ rules:
+ - alert: raid array degraded
+ expr: ohai_array_info{status="degraded"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
+ - alert: raid disk failed
+ expr: ohai_disk_info{status="failed"} > 0
+ for: 5m
+ labels:
+ alertgroup: "{{ $labels.instance }}"
- name: smart
rules:
- alert: smart failure
labels:
alertgroup: "{{ $labels.instance }}"
- alert: smart ssd wearout approaching
- expr: smart_percentage_used >= 90
+ expr: smart_percentage_used >= 80
for: 60m
labels:
alertgroup: "{{ $labels.instance }}"
rules:
- alert: statuscake uptime check failing
expr: statuscake_uptime{status="down",paused="false"} > 0
- for: 0m
+ for: 10m
labels:
alertgroup: statuscake
- name: systemd