]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
cloudwatch: add account name comment
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index b53007521e9c3df529ff600d122c5db4d04c217f..6dc966050a50d043d9ad5515ed19bf78ab364f18 100644 (file)
@@ -3,6 +3,13 @@
 groups:
   - name: amsterdam
     rules:
 groups:
   - name: amsterdam
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
@@ -11,7 +18,7 @@ groups:
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site power
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site power
-        expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3
+        expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
         for: 6m
         labels:
           alertgroup: "amsterdam"
         for: 6m
         labels:
           alertgroup: "amsterdam"
@@ -45,13 +52,13 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
-      - alert: apache low request rate
-        expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
-        for: 15m
+      - alert: apache connection limit
+        expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
+        for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
-          request_rate: "{{ $value | humanizePercentage }}"
+          connections: "{{ $value | humanizePercentage }}"
   - name: chef
     rules:
       - alert: chef client not running
   - name: chef
     rules:
       - alert: chef client not running
@@ -116,6 +123,13 @@ groups:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
           failure_rate: "{{ $value }} jobs/s"
   - name: dublin
     rules:
+      - alert: uplink
+        expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          status: "{{ $value }}"
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
         for: 6m
@@ -153,13 +167,23 @@ groups:
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
           alertgroup: fastly
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
-      - alert: fastly healthcheck failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 0
+      - alert: fastly frontend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
         for: 15m
         labels:
           alertgroup: fastly
         for: 15m
         labels:
           alertgroup: fastly
-      - alert: multiple fastly healthchecks failing
-        expr: count(fastly_healthcheck_status == 0) by (service) > 4
+      - alert: fastly frontend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
+        for: 5m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck warning
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: fastly backend healthcheck critical
+        expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
         for: 5m
         labels:
           alertgroup: fastly
         for: 5m
         labels:
           alertgroup: fastly
@@ -255,21 +279,51 @@ groups:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
+      - alert: juniper red alarms
+        expr: juniper_alarms_red_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
+      - alert: juniper yellow alarms
+        expr: juniper_alarms_yellow_count > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          alarm_count: "{{ $value }} alarms"
       - alert: juniper cpu alarm
       - alert: juniper cpu alarm
-        expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
+        expr: junos_route_engine_load_average_five / 2 > 0.5
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+        annotations:
+          load_average: "{{ $value | humanizePercentage }}"
       - alert: juniper fan alarm
       - alert: juniper fan alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_fan_up != 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
-        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
+        expr: junos_environment_power_up != 1
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
+      - alert: juniper laser receive power
+        expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          power: "{{ $value }} dBm"
+      - alert: juniper laser transmit power
+        expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          power: "{{ $value }} dBm"
   - name: mail
     rules:
       - alert: exim down
   - name: mail
     rules:
       - alert: exim down
@@ -278,7 +332,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: exim queue length
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: exim queue length
-        expr: exim_queue > exim_queue_limit
+        expr: exim_queue > ignoring(job) exim_queue_limit
         for: 60m
         labels:
           alertgroup: mail
         for: 60m
         labels:
           alertgroup: mail
@@ -363,14 +417,14 @@ groups:
   - name: network
     rules:
       - alert: interface transmit rate
   - name: network
     rules:
       - alert: interface transmit rate
-        expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface receive rate
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface receive rate
-        expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
+        expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -526,7 +580,7 @@ groups:
         annotations:
           queries: "{{ $value }}"
       - alert: postgresql idle transactions
         annotations:
           queries: "{{ $value }}"
       - alert: postgresql idle transactions
-        expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="120"}) by (instance, server)
+        expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -660,14 +714,14 @@ groups:
   - name: taginfo
     rules:
       - alert: taginfo planet age
   - name: taginfo
     rules:
       - alert: taginfo planet age
-        expr: time() - taginfo_data_from_seconds > 129600
+        expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
         for: 0m
         labels:
           alertgroup: taginfo
         annotations:
           age: "{{ $value | humanizeDuration }}"
       - alert: taginfo database age
         for: 0m
         labels:
           alertgroup: taginfo
         annotations:
           age: "{{ $value | humanizeDuration }}"
       - alert: taginfo database age
-        expr: time() - taginfo_database_update_finish_seconds > 129600
+        expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
         for: 0m
         labels:
           alertgroup: taginfo
         for: 0m
         labels:
           alertgroup: taginfo
@@ -727,8 +781,8 @@ groups:
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: job processing rate
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: job processing rate
-        expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
-        for: 15m
+        expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
+        for: 1h
         labels:
           alertgroup: web
         annotations:
         labels:
           alertgroup: web
         annotations: