]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Reduce sensitivity of CPU pressure alerts
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 4511e1eac4123ad745924a437ac568d1d06b82df..c34647330051da1d2f98ea06b90959724cc82ccd 100644 (file)
@@ -1,13 +1,36 @@
 # DO NOT EDIT - This file is being maintained by Chef
 
 groups:
-  - name: alertmanager
+  - name: amsterdam
     rules:
-      - alert: prometheus target missing
-        expr: up == 0
-        for: 5m
+      - alert: pdu current draw
+        expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
+        for: 6m
         labels:
-          alertgroup: "prometheus"
+          alertgroup: "amsterdam"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site current draw
+        expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site temperature
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          temperature: "{{ $value | humanize }}C"
+      - alert: site humidity
+        expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
+        for: 6m
+        labels:
+          alertgroup: "amsterdam"
+        annotations:
+          humidity: "{{ $value | humanizePercentage }}"
   - name: apache
     rules:
       - alert: apache down
@@ -22,15 +45,149 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           busy_workers: "{{ $value | humanizePercentage }}"
+      - alert: apache low request rate
+        expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
+        for: 15m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          request_rate: "{{ $value | humanizePercentage }}"
+  - name: chef
+    rules:
+      - alert: chef client not running
+        expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
+        for: 12h
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          down_time: "{{ $value | humanizeDuration }}"
+  - name: cisco
+    rules:
+      - alert: cisco fan alarm
+        expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
+      - alert: cisco temperature alarm
+        expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+        annotations:
+          temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
+      - alert: cisco main power alarm
+        expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+      - alert: cisco redundant power alarm
+        expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+  - name: cpu
+    rules:
+      - alert: cpu pressure
+        expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
+        for: 60m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          pressure: "{{ $value | humanizePercentage }}"
   - name: database
     rules:
       - alert: postgres replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 5m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: database
         annotations:
           delay: "{{ $value | humanizeDuration }}"
+  - name: discourse
+    rules:
+      - alert: discourse job failure rate
+        expr: rate(discourse_job_failures[5m]) > 0
+        for: 5m
+        labels:
+          alertgroup: discourse
+        annotations:
+          failure_rate: "{{ $value }} jobs/s"
+  - name: dublin
+    rules:
+      - alert: pdu current draw
+        expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site current draw
+        expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          current: "{{ $value | humanize }}A"
+      - alert: site temperature
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          temperature: "{{ $value | humanize }}C"
+      - alert: site humidity
+        expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
+        for: 6m
+        labels:
+          alertgroup: "dublin"
+        annotations:
+          humidity: "{{ $value | humanizePercentage }}"
+  - name: fastly
+    rules:
+      - alert: fastly error rate
+        expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
+        for: 15m
+        labels:
+          alertgroup: fastly
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
+      - alert: fastly healthcheck failing
+        expr: count(fastly_healthcheck_status == 0) by (service) > 0
+        for: 15m
+        labels:
+          alertgroup: fastly
+      - alert: multiple fastly healthchecks failing
+        expr: count(fastly_healthcheck_status == 0) by (service) > 4
+        for: 5m
+        labels:
+          alertgroup: fastly
+  - name: filesystem
+    rules:
+      - alert: readonly filesystem
+        expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: filesystem low on space
+        expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          percentage_free: "{{ $value | humanizePercentage }}"
+          free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
+          total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
+      - alert: filesystem low on inodes
+        expr: node_filesystem_files_free / node_filesystem_files < 0.1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          percentage_free: "{{ $value | humanizePercentage }}"
+          free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
+          total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
   - name: hwmon
     rules:
       - alert: hwmon fan alarm
@@ -59,6 +216,15 @@ groups:
           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
+  - name: io
+    rules:
+      - alert: io pressure
+        expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
+        for: 60m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          pressure: "{{ $value | humanizePercentage }}"
   - name: ipmi
     rules:
       - alert: ipmi fan alarm
@@ -87,6 +253,44 @@ groups:
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
+  - name: juniper
+    rules:
+      - alert: juniper cpu alarm
+        expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+      - alert: juniper fan alarm
+        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+      - alert: juniper power alarm
+        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
+  - name: mail
+    rules:
+      - alert: exim down
+        expr: exim_up == 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: exim queue length
+        expr: exim_queue > exim_queue_limit
+        for: 60m
+        labels:
+          alertgroup: mail
+        annotations:
+          queue_length: "{{ $value }}"
+      - alert: mailman queue length
+        expr: mailman_queue_length > 200
+        for: 60m
+        labels:
+          alertgroup: mail
+        annotations:
+          queue_length: "{{ $value }}"
   - name: mdadm
     rules:
       - alert: mdadm array inactive
@@ -99,6 +303,16 @@ groups:
           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
+      - alert: mdadm array degraded
+        expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
+          active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
+          failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
+          spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
       - alert: mdadm disk failed
         expr: node_md_disks{state="failed"} > 0
         for: 0m
@@ -113,18 +327,18 @@ groups:
     rules:
       - alert: low memory
         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
-        for: 5m
+        for: 15m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           memory_free: "{{ $value | humanizePercentage }}"
       - alert: memory pressure
-        expr: rate(node_vmstat_pgmajfault[1m]) > 1000
-        for: 5m
+        expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
+        for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
-          major_page_faults: "{{ $value }} faults/s"
+          pressure: "{{ $value | humanizePercentage }}"
       - alert: oom kill detected
         expr: increase(node_vmstat_oom_kill[1m]) > 0
         for: 0m
@@ -132,6 +346,20 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           new_oom_kills: "{{ $value }}"
+  - name: mysql
+    rules:
+      - alert: mysql down
+        expr: mysql_up == 0
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: mysql connection limit
+        expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          connections_used: "{{ $value | humanizePercentage }}"
   - name: network
     rules:
       - alert: interface transmit rate
@@ -149,12 +377,19 @@ groups:
         annotations:
           bandwidth_used: "{{ $value | humanizePercentage }}"
       - alert: interface transmit errors
-        expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
+        expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
         for: 5m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
+      - alert: wireguard interface transmit errors
+        expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
+        for: 1h
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
       - alert: interface receive errors
         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
         for: 5m
@@ -169,6 +404,92 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           entries_used: "{{ $value | humanizePercentage }}"
+  - name: nominatim
+    rules:
+      - alert: nominatim replication delay
+        expr: nominatim_replication_delay > 10800
+        for: 1h
+        labels:
+          alertgroup: nominatim
+        annotations:
+          delay: "{{ $value | humanizeDuration }}"
+  - name: overpass
+    rules:
+      - alert: overpass osm database age
+        expr: overpass_database_age_seconds{database="osm"} > 3600
+        for: 1h
+        labels:
+          alertgroup: overpass
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
+      - alert: overpass area database age
+        expr: overpass_database_age_seconds{database="area"} > 86400
+        for: 1h
+        labels:
+          alertgroup: overpass
+        annotations:
+          age: "{{ $value | humanizeDuration }}"
+  - name: passenger
+    rules:
+      - alert: passenger down
+        expr: passenger_up == 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: passenger queuing
+        expr: passenger_top_level_request_queue > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: passenger application queuing
+        expr: passenger_app_request_queue > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+  - name: planet
+    rules:
+      - alert: planet dump overdue
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 24h
+        labels:
+          alertgroup: planet
+        annotations:
+          overdue_by: "{{ $value | humanizeDuration }}"
+      - alert: notes dump overdue
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 6h
+        labels:
+          alertgroup: planet
+        annotations:
+          overdue_by: "{{ $value | humanizeDuration }}"
+      - alert: daily replication feed delayed
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 3h
+        labels:
+          alertgroup: planet
+        annotations:
+          delayed_by: "{{ $value | humanizeDuration }}"
+      - alert: hourly replication feed delayed
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 30m
+        labels:
+          alertgroup: planet
+        annotations:
+          delayed_by: "{{ $value | humanizeDuration }}"
+      - alert: minutely replication feed delayed
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 5m
+        labels:
+          alertgroup: planet
+        annotations:
+          delayed_by: "{{ $value | humanizeDuration }}"
+      - alert: changeset replication feed delayed
+        expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
+        for: 5m
+        labels:
+          alertgroup: planet
+        annotations:
+          delayed_by: "{{ $value | humanizeDuration }}"
   - name: postgresql
     rules:
       - alert: postgresql down
@@ -177,8 +498,8 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: postgresql replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 1m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
@@ -191,7 +512,7 @@ groups:
         annotations:
           connections_used: "{{ $value | humanizePercentage }}"
       - alert: postgresql deadlocks
-        expr: increase(pg_stat_database_deadlocks[1m]) > 5
+        expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -204,6 +525,51 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           queries: "{{ $value }}"
+  - name: prometheus
+    rules:
+      - alert: prometheus configuration error
+        expr: prometheus_config_last_reload_successful == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+      - alert: prometheus target missing
+        expr: up == 0
+        for: 10m
+        labels:
+          alertgroup: "prometheus"
+  - name: raid
+    rules:
+      - alert: raid controller battery failed
+        expr: ohai_controller_info{battery_status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: raid array degraded
+        expr: ohai_array_info{status="degraded"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: raid disk failed
+        expr: ohai_disk_info{status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+  - name: rasdaemon
+    rules:
+      - alert: memory controller errors
+        expr: increase(rasdaemon_mc_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_errors: "{{ $value }}"
+      - alert: pcie aer errors
+        expr: increase(rasdaemon_aer_events_total[1m]) > 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          new_errors: "{{ $value }}"
   - name: smart
     rules:
       - alert: smart failure
@@ -212,12 +578,30 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: smart ssd wearout approaching
-        expr: smart_percentage_used >= 90
+        expr: smart_percentage_used / 100 >= 0.8
         for: 60m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           percentage_used: "{{ $value | humanizePercentage }}"
+  - name: smokeping
+    rules:
+      - alert: packet loss
+        expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
+        for: 10m
+        labels:
+          alertgroup: smokeping
+        annotations:
+          loss_rate: "{{ $value | humanizePercentage }}"
+  - name: snmp
+    rules:
+      - alert: snmp pdus missing
+        expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
+        for: 15m
+        labels:
+          alertgroup: snmp
+        annotations:
+          missing_pdus: "{{ $value }}"
   - name: ssl
     rules:
       - alert: ssl certificate probe failed
@@ -242,11 +626,30 @@ groups:
         for: 0m
         labels:
           alertgroup: ssl
+  - name: statuscake
+    rules:
+      - alert: statuscake uptime check failing
+        expr: statuscake_paused == 0 and statuscake_up == 0
+        for: 10m
+        labels:
+          alertgroup: statuscake
+  - name: systemd
+    rules:
+      - alert: systemd failed service
+        expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: systemd failed chef client service
+        expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
+        for: 0m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
   - name: tile
     rules:
       - alert: renderd replication delay
         expr: renderd_replication_delay > 120
-        for: 5m
+        for: 15m
         labels:
           alertgroup: tile
         annotations:
@@ -258,6 +661,13 @@ groups:
           alertgroup: tile
         annotations:
           miss_rate: "{{ $value | humanizePercentage }}"
+      - alert: tile render rate
+        expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
+        for: 15m
+        labels:
+          alertgroup: tile
+        annotations:
+          render_rate: "{{ $value }} tiles/s"
   - name: time
     rules:
       - alert: clock not synchronising
@@ -272,3 +682,19 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
+  - name: web
+    rules:
+      - alert: web error rate
+        expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
+        for: 5m
+        labels:
+          alertgroup: web
+        annotations:
+          error_rate: "{{ $value | humanizePercentage }}"
+      - alert: job processing rate
+        expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
+        for: 15m
+        labels:
+          alertgroup: web
+        annotations:
+          job_processing_rate: "{{ $value | humanizePercentage }}"