]> git.openstreetmap.org Git - chef.git/blobdiff - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Don't bother disabling shorewall before we remove it
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
index 5b2ec56ac9623b2579c5c97ccebb3b7208af310b..7a535337264c8a90d42bd00653ecc5ff64ddb12a 100644 (file)
@@ -5,28 +5,28 @@ groups:
     rules:
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site current draw
         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site temperature
-        expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
-        for: 5m
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
+        for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
           temperature: "{{ $value | humanize }}C"
       - alert: site humidity
         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "amsterdam"
         annotations:
@@ -99,8 +99,8 @@ groups:
   - name: database
     rules:
       - alert: postgres replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 5m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: database
         annotations:
@@ -109,28 +109,28 @@ groups:
     rules:
       - alert: pdu current draw
         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site current draw
         expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
           current: "{{ $value | humanize }}A"
       - alert: site temperature
-        expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 25
-        for: 5m
+        expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
+        for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
           temperature: "{{ $value | humanize }}C"
       - alert: site humidity
         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
-        for: 5m
+        for: 6m
         labels:
           alertgroup: "dublin"
         annotations:
@@ -145,19 +145,19 @@ groups:
         annotations:
           error_rate: "{{ $value | humanizePercentage }}"
       - alert: fastly healthcheck failing
-        expr: count(fastly_healthcheck_status == 0) > 0
+        expr: count(fastly_healthcheck_status == 0) by (service) > 0
         for: 15m
         labels:
           alertgroup: fastly
       - alert: multiple fastly healthchecks failing
-        expr: count(fastly_healthcheck_status == 0) > 4
+        expr: count(fastly_healthcheck_status == 0) by (service) > 4
         for: 5m
         labels:
           alertgroup: fastly
   - name: filesystem
     rules:
       - alert: readonly filesystem
-        expr: node_filesystem_readonly == 1
+        expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
         for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
@@ -246,18 +246,28 @@ groups:
           alertgroup: "{{ $labels.instance }}"
   - name: juniper
     rules:
+      - alert: juniper cpu alarm
+        expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.site }}"
       - alert: juniper fan alarm
-        expr: sum_over_time(jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"}[6m]) > 0
+        expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
       - alert: juniper power alarm
-        expr: sum_over_time(jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"}[6m]) > 0
+        expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
         for: 5m
         labels:
           alertgroup: "{{ $labels.site }}"
   - name: mail
     rules:
+      - alert: exim down
+        expr: exim_up == 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
       - alert: exim queue length
         expr: exim_queue > exim_queue_limit
         for: 60m
@@ -327,6 +337,20 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           new_oom_kills: "{{ $value }}"
+  - name: mysql
+    rules:
+      - alert: mysql down
+        expr: mysql_up == 0
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: mysql connection limit
+        expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
+        for: 1m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+        annotations:
+          connections_used: "{{ $value | humanizePercentage }}"
   - name: network
     rules:
       - alert: interface transmit rate
@@ -396,6 +420,23 @@ groups:
           alertgroup: overpass
         annotations:
           age: "{{ $value | humanizeDuration }}"
+  - name: passenger
+    rules:
+      - alert: passenger down
+        expr: passenger_up == 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: passenger queuing
+        expr: passenger_top_level_request_queue > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
+      - alert: passenger application queuing
+        expr: passenger_app_request_queue > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
   - name: planet
     rules:
       - alert: planet dump overdue
@@ -448,8 +489,8 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: postgresql replication delay
-        expr: pg_replication_lag_seconds > 5
-        for: 1m
+        expr: pg_replication_lag_seconds > 30
+        for: 15m
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
@@ -489,6 +530,11 @@ groups:
           alertgroup: "prometheus"
   - name: raid
     rules:
+      - alert: raid controller battery failed
+        expr: ohai_controller_info{battery_status="failed"} > 0
+        for: 5m
+        labels:
+          alertgroup: "{{ $labels.instance }}"
       - alert: raid array degraded
         expr: ohai_array_info{status="degraded"} > 0
         for: 5m
@@ -514,7 +560,7 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
         annotations:
-          new_ercrors: "{{ $value }}"
+          new_errors: "{{ $value }}"
   - name: smart
     rules:
       - alert: smart failure
@@ -529,6 +575,15 @@ groups:
           alertgroup: "{{ $labels.instance }}"
         annotations:
           percentage_used: "{{ $value | humanizePercentage }}"
+  - name: snmp
+    rules:
+      - alert: snmp pdus missing
+        expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
+        for: 15m
+        labels:
+          alertgroup: snmp
+        annotations:
+          missing_pdus: "{{ $value }}"
   - name: ssl
     rules:
       - alert: ssl certificate probe failed
@@ -568,8 +623,8 @@ groups:
         labels:
           alertgroup: "{{ $labels.instance }}"
       - alert: systemd failed chef client service
-        expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
-        for: 6h
+        expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
+        for: 0m
         labels:
           alertgroup: "{{ $labels.instance }}"
   - name: tile
@@ -588,6 +643,13 @@ groups:
           alertgroup: tile
         annotations:
           miss_rate: "{{ $value | humanizePercentage }}"
+      - alert: tile render rate
+        expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
+        for: 15m
+        labels:
+          alertgroup: tile
+        annotations:
+          render_rate: "{{ $value }} tiles/s"
   - name: time
     rules:
       - alert: clock not synchronising