# DO NOT EDIT - This file is being maintained by Chef groups: - name: amsterdam rules: - alert: uplink expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1 for: 6m labels: alertgroup: "amsterdam" annotations: status: "{{ $value }}" - alert: pdu current draw expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28 for: 6m labels: alertgroup: "amsterdam" annotations: current: "{{ $value | humanize }}A" - alert: site power expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5 for: 6m labels: alertgroup: "amsterdam" annotations: current: "{{ $value | humanize }}kVA" - alert: site temperature expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26 for: 6m labels: alertgroup: "amsterdam" annotations: temperature: "{{ $value | humanize }}C" - alert: site humidity expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65 for: 6m labels: alertgroup: "amsterdam" annotations: humidity: "{{ $value | humanizePercentage }}" - name: apache rules: - alert: apache down expr: apache_up == 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: apache workers busy expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: busy_workers: "{{ $value | humanizePercentage }}" - alert: apache connection limit expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: connections: "{{ $value | humanizePercentage }}" - name: chef rules: - alert: chef client not running expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600 for: 12h labels: alertgroup: "{{ $labels.instance }}" annotations: down_time: "{{ $value | humanizeDuration }}" - name: cisco rules: - alert: cisco fan alarm expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}" - alert: cisco temperature alarm expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}" - alert: cisco main power alarm expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" - alert: cisco redundant power alarm expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" - name: cpu rules: - alert: cpu pressure expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: pressure: "{{ $value | humanizePercentage }}" - name: database rules: - alert: active rails queries expr: sum(pg_stat_activity_count{instance="snap-01",datname="openstreetmap",usename="rails",state="active"}) > 50 and on (instance) chef_role{name="db-master"} for: 5m labels: alertgroup: database annotations: queries: "{{ $value }}" - alert: active cgimap queries expr: sum(pg_stat_activity_count{instance="snap-01",datname="openstreetmap",usename="cgimap",state="active"}) > 30 and on (instance) chef_role{name="db-master"} for: 5m labels: alertgroup: database annotations: delay: "{{ $value }}" - name: discourse rules: - alert: discourse job failure rate expr: rate(discourse_job_failures[5m]) > 0 for: 5m labels: alertgroup: discourse annotations: failure_rate: "{{ $value }} jobs/s" - name: dublin rules: - alert: uplink expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1 for: 6m labels: alertgroup: "dublin" annotations: status: "{{ $value }}" - alert: pdu current draw expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28 for: 6m labels: alertgroup: "dublin" annotations: current: "{{ $value | humanize }}A" - alert: site power expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4 for: 6m labels: alertgroup: "dublin" annotations: current: "{{ $value | humanize }}kVA" - alert: site temperature expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26 for: 6m labels: alertgroup: "dublin" annotations: temperature: "{{ $value | humanize }}C" - alert: site humidity expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65 for: 6m labels: alertgroup: "dublin" annotations: humidity: "{{ $value | humanizePercentage }}" - name: fastly rules: - alert: fastly error rate expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005 for: 15m labels: alertgroup: fastly annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: fastly frontend healthcheck warning expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2 for: 15m labels: alertgroup: fastly - alert: fastly frontend healthcheck critical expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter) for: 5m labels: alertgroup: fastly - alert: fastly backend healthcheck warning expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10 for: 15m labels: alertgroup: fastly - alert: fastly backend healthcheck critical expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend) for: 5m labels: alertgroup: fastly - name: filesystem rules: - alert: readonly filesystem expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d]) for: 0m labels: alertgroup: "{{ $labels.instance }}" - alert: filesystem low on space expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: percentage_free: "{{ $value | humanizePercentage }}" free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}" total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}" - alert: filesystem low on inodes expr: node_filesystem_files_free / node_filesystem_files < 0.1 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: percentage_free: "{{ $value | humanizePercentage }}" free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}" total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}" - name: hwmon rules: - alert: hwmon fan alarm expr: node_hwmon_fan_alarm == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}" fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}" - alert: hwmon temperature alarm expr: node_hwmon_temp_alarm == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}" temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}" temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}" - alert: hwmon voltage alarm expr: node_hwmon_in_alarm == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}" - name: io rules: - alert: io pressure expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: pressure: "{{ $value | humanizePercentage }}" - name: ipmi rules: - alert: ipmi fan alarm expr: ipmi_fan_speed_state > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}" - alert: ipmi temperature alarm expr: ipmi_temperature_state > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}" - alert: ipmi voltage alarm expr: ipmi_voltage_state > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}" - alert: ipmi power alarm expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - name: juniper rules: - alert: juniper red alarms expr: juniper_alarms_red_count > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: alarm_count: "{{ $value }} alarms" - alert: juniper yellow alarms expr: juniper_alarms_yellow_count > 0 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: alarm_count: "{{ $value }} alarms" - alert: juniper cpu alarm expr: junos_route_engine_load_average_five / 2 > 0.5 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: load_average: "{{ $value | humanizePercentage }}" - alert: juniper fan alarm expr: junos_environment_fan_up != 1 for: 5m labels: alertgroup: "{{ $labels.site }}" - alert: juniper power alarm expr: junos_environment_power_up != 1 for: 5m labels: alertgroup: "{{ $labels.site }}" - alert: juniper laser receive power expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: power: "{{ $value }} dBm" - alert: juniper laser transmit power expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1 for: 5m labels: alertgroup: "{{ $labels.site }}" annotations: power: "{{ $value }} dBm" - name: mail rules: - alert: exim down expr: exim_up == 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: exim queue length expr: exim_queue > ignoring(job) exim_queue_limit for: 60m labels: alertgroup: mail annotations: queue_length: "{{ $value }}" - alert: mailman queue length expr: mailman_queue_length > 200 for: 60m labels: alertgroup: mail annotations: queue_length: "{{ $value }}" - name: mdadm rules: - alert: mdadm array inactive expr: node_md_state{state="inactive"} > 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" - alert: mdadm array degraded expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" - alert: mdadm disk failed expr: node_md_disks{state="failed"} > 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}" - name: memory rules: - alert: low memory expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1 for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: memory_free: "{{ $value | humanizePercentage }}" - alert: memory pressure expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: pressure: "{{ $value | humanizePercentage }}" - alert: oom kill detected expr: increase(node_vmstat_oom_kill[1m]) > 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: new_oom_kills: "{{ $value }}" - name: mysql rules: - alert: mysql down expr: mysql_up == 0 for: 1m labels: alertgroup: "{{ $labels.instance }}" - alert: mysql connection limit expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8 for: 1m labels: alertgroup: "{{ $labels.instance }}" annotations: connections_used: "{{ $value | humanizePercentage }}" - name: network rules: - alert: interface redundancy lost expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)") for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: link_count: "{{ $value }}" - alert: interface transmit rate expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface receive rate expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: bandwidth_used: "{{ $value | humanizePercentage }}" - alert: interface transmit errors expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: wireguard interface transmit errors expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05 for: 1h labels: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: interface receive errors expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: conntrack entries expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8 for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: entries_used: "{{ $value | humanizePercentage }}" - name: nominatim rules: - alert: nominatim replication delay expr: nominatim_replication_delay > 10800 for: 1h labels: alertgroup: nominatim annotations: delay: "{{ $value | humanizeDuration }}" - name: overpass rules: - alert: overpass osm database age expr: overpass_database_age_seconds{database="osm"} > 3600 for: 1h labels: alertgroup: overpass annotations: age: "{{ $value | humanizeDuration }}" - alert: overpass area database age expr: overpass_database_age_seconds{database="area"} > 86400 for: 1h labels: alertgroup: overpass annotations: age: "{{ $value | humanizeDuration }}" - name: passenger rules: - alert: passenger down expr: passenger_up == 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: passenger queuing expr: passenger_top_level_request_queue > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: passenger application queuing expr: passenger_app_request_queue > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - name: planet rules: - alert: planet dump overdue expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 24h labels: alertgroup: planet annotations: overdue_by: "{{ $value | humanizeDuration }}" - alert: notes dump overdue expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 6h labels: alertgroup: planet annotations: overdue_by: "{{ $value | humanizeDuration }}" - alert: daily replication feed delayed expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 3h labels: alertgroup: planet annotations: delayed_by: "{{ $value | humanizeDuration }}" - alert: hourly replication feed delayed expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 30m labels: alertgroup: planet annotations: delayed_by: "{{ $value | humanizeDuration }}" - alert: minutely replication feed delayed expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 5m labels: alertgroup: planet annotations: delayed_by: "{{ $value | humanizeDuration }}" - alert: changeset replication feed delayed expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1 for: 5m labels: alertgroup: planet annotations: delayed_by: "{{ $value | humanizeDuration }}" - name: postgresql rules: - alert: postgresql down expr: pg_up == 0 for: 1m labels: alertgroup: "{{ $labels.instance }}" - alert: postgresql replication delay expr: pg_replication_lag_seconds > 30 for: 15m labels: alertgroup: "{{ $labels.instance }}" annotations: delay: "{{ $value | humanizeDuration }}" - alert: postgresql connection limit expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8 for: 1m labels: alertgroup: "{{ $labels.instance }}" annotations: connections_used: "{{ $value | humanizePercentage }}" - alert: postgresql deadlocks expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: new_deadlocks: "{{ $value }}" - alert: postgresql idle transactions expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server) for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: queries: "{{ $value }}" - name: prometheus rules: - alert: prometheus configuration error expr: prometheus_config_last_reload_successful == 0 for: 10m labels: alertgroup: "prometheus" - alert: prometheus target missing expr: up == 0 for: 10m labels: alertgroup: "prometheus" - name: raid rules: - alert: raid controller battery failed expr: ohai_controller_info{battery_status="failed"} > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: raid controller battery recharging expr: ohai_controller_info{battery_status="recharging"} > 0 for: 4h labels: alertgroup: "{{ $labels.instance }}" - alert: raid array degraded expr: ohai_array_info{status="degraded"} > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: raid disk failed expr: ohai_disk_info{status="failed"} > 0 for: 5m labels: alertgroup: "{{ $labels.instance }}" - name: rasdaemon rules: - alert: memory controller errors expr: increase(rasdaemon_mc_events_total[1m]) > 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: new_errors: "{{ $value }}" - alert: pcie aer errors expr: increase(rasdaemon_aer_events_total[1m]) > 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" annotations: new_errors: "{{ $value }}" - name: smart rules: - alert: smart failure expr: smart_health_status == 0 for: 60m labels: alertgroup: "{{ $labels.instance }}" - alert: smart ssd wearout approaching expr: smart_percentage_used / 100 >= 0.8 for: 60m labels: alertgroup: "{{ $labels.instance }}" annotations: percentage_used: "{{ $value | humanizePercentage }}" - name: smokeping rules: - alert: packet loss expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02 for: 10m labels: alertgroup: smokeping annotations: loss_rate: "{{ $value | humanizePercentage }}" - name: snmp rules: - alert: snmp pdus missing expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0 for: 15m labels: alertgroup: snmp annotations: missing_pdus: "{{ $value }}" - name: ssl rules: - alert: ssl certificate probe failed expr: ssl_probe_success == 0 for: 60m labels: alertgroup: ssl - alert: ssl certificate expiry expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14 for: 0m labels: alertgroup: ssl annotations: expires_in: "{{ $value | humanizeDuration }}" - alert: ssl certificate revoked expr: ssl_ocsp_response_status == 1 for: 0m labels: alertgroup: ssl - alert: ocsp status unknown expr: ssl_ocsp_response_status == 1 for: 0m labels: alertgroup: ssl - name: statuscake rules: - alert: statuscake uptime check failing expr: statuscake_paused == 0 and statuscake_up == 0 for: 10m labels: alertgroup: statuscake - name: systemd rules: - alert: systemd failed service expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: systemd failed chef client service expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0 for: 0m labels: alertgroup: "{{ $labels.instance }}" - name: taginfo rules: - alert: taginfo planet age expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"} for: 0m labels: alertgroup: taginfo annotations: age: "{{ $value | humanizeDuration }}" - alert: taginfo database age expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"} for: 0m labels: alertgroup: taginfo annotations: age: "{{ $value | humanizeDuration }}" - alert: taginfo database size expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1 for: 30m labels: alertgroup: taginfo annotations: size_change: "{{ $value | humanizePercentage }}" - name: tile rules: - alert: renderd replication delay expr: renderd_replication_delay > 120 for: 15m labels: alertgroup: tile annotations: delay: "{{ $value | humanizeDuration }}" - alert: missed tile rate expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05 for: 5m labels: alertgroup: tile annotations: miss_rate: "{{ $value | humanizePercentage }}" - alert: tile render rate expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0 for: 15m labels: alertgroup: tile annotations: render_rate: "{{ $value }} tiles/s" - name: time rules: - alert: clock not synchronising expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16 for: 5m labels: alertgroup: "{{ $labels.instance }}" - alert: clock skew detected expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0) for: 5m labels: alertgroup: "{{ $labels.instance }}" annotations: skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}" - name: web rules: - alert: web error rate expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 for: 5m labels: alertgroup: web annotations: error_rate: "{{ $value | humanizePercentage }}" - alert: job processing rate expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1 for: 1h labels: alertgroup: web annotations: job_processing_rate: "{{ $value | humanizePercentage }}"