]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Add an alert for the mail queue
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: alertmanager
5     rules:
6       - alert: prometheus target missing
7         expr: up == 0
8         for: 5m
9         labels:
10           alertgroup: "prometheus"
11   - name: apache
12     rules:
13       - alert: apache down
14         expr: apache_up == 0
15         for: 5m
16         labels:
17           alertgroup: "{{ $labels.instance }}"
18       - alert: apache workers busy
19         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
20         for: 5m
21         labels:
22           alertgroup: "{{ $labels.instance }}"
23         annotations:
24           busy_workers: "{{ $value | humanizePercentage }}"
25   - name: cpu
26     rules:
27       - alert: cpu pressure
28         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.3
29         for: 15m
30         labels:
31           alertgroup: "{{ $labels.instance }}"
32         annotations:
33           pressure: "{{ $value | humanizePercentage }}"
34   - name: database
35     rules:
36       - alert: postgres replication delay
37         expr: pg_replication_lag_seconds > 5
38         for: 5m
39         labels:
40           alertgroup: database
41         annotations:
42           delay: "{{ $value | humanizeDuration }}"
43   - name: filesystem
44     rules:
45       - alert: readonly filesystem
46         expr: node_filesystem_readonly == 1
47         for: 0m
48         labels:
49           alertgroup: "{{ $labels.instance }}"
50       - alert: filesystem low on space
51         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
52         for: 5m
53         labels:
54           alertgroup: "{{ $labels.instance }}"
55         annotations:
56           percentage_free: "{{ $value | humanizePercentage }}"
57           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
58           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
59       - alert: filesystem low on inodes
60         expr: node_filesystem_files_free / node_filesystem_files < 0.1
61         for: 5m
62         labels:
63           alertgroup: "{{ $labels.instance }}"
64         annotations:
65           percentage_free: "{{ $value | humanizePercentage }}"
66           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
67           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
68   - name: hwmon
69     rules:
70       - alert: hwmon fan alarm
71         expr: node_hwmon_fan_alarm == 1
72         for: 5m
73         labels:
74           alertgroup: "{{ $labels.instance }}"
75         annotations:
76           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
77           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
78       - alert: hwmon temperature alarm
79         expr: node_hwmon_temp_alarm == 1
80         for: 5m
81         labels:
82           alertgroup: "{{ $labels.instance }}"
83         annotations:
84           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
85           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
86           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: hwmon voltage alarm
88         expr: node_hwmon_in_alarm == 1
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.instance }}"
92         annotations:
93           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
94           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
95           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
96   - name: io
97     rules:
98       - alert: io pressure
99         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
100         for: 60m
101         labels:
102           alertgroup: "{{ $labels.instance }}"
103         annotations:
104           pressure: "{{ $value | humanizePercentage }}"
105   - name: ipmi
106     rules:
107       - alert: ipmi fan alarm
108         expr: ipmi_fan_speed_state > 0
109         for: 5m
110         labels:
111           alertgroup: "{{ $labels.instance }}"
112         annotations:
113           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
114       - alert: ipmi temperature alarm
115         expr: ipmi_temperature_state > 0
116         for: 5m
117         labels:
118           alertgroup: "{{ $labels.instance }}"
119         annotations:
120           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
121       - alert: ipmi voltage alarm
122         expr: ipmi_voltage_state > 0
123         for: 5m
124         labels:
125           alertgroup: "{{ $labels.instance }}"
126         annotations:
127           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
128       - alert: ipmi power alarm
129         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
130         for: 5m
131         labels:
132           alertgroup: "{{ $labels.instance }}"
133   - name: mail
134     rules:
135       - alert: mail queue length
136         expr: exim_queue > exim_queue_limit
137         for: 60m
138         labels:
139           alertgroup: mail
140         annotations:
141           queue_length: "{{ $value }}"
142   - name: mdadm
143     rules:
144       - alert: mdadm array inactive
145         expr: node_md_state{state="inactive"} > 0
146         for: 0m
147         labels:
148           alertgroup: "{{ $labels.instance }}"
149         annotations:
150           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
151           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
152           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
153           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
154       - alert: mdadm array degraded
155         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
156         for: 0m
157         labels:
158           alertgroup: "{{ $labels.instance }}"
159         annotations:
160           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
161           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
162           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
163           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
164       - alert: mdadm disk failed
165         expr: node_md_disks{state="failed"} > 0
166         for: 0m
167         labels:
168           alertgroup: "{{ $labels.instance }}"
169         annotations:
170           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
171           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
172           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
173           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
174   - name: memory
175     rules:
176       - alert: low memory
177         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
178         for: 15m
179         labels:
180           alertgroup: "{{ $labels.instance }}"
181         annotations:
182           memory_free: "{{ $value | humanizePercentage }}"
183       - alert: memory pressure
184         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
185         for: 60m
186         labels:
187           alertgroup: "{{ $labels.instance }}"
188         annotations:
189           pressure: "{{ $value | humanizePercentage }}"
190       - alert: oom kill detected
191         expr: increase(node_vmstat_oom_kill[1m]) > 0
192         for: 0m
193         labels:
194           alertgroup: "{{ $labels.instance }}"
195         annotations:
196           new_oom_kills: "{{ $value }}"
197   - name: network
198     rules:
199       - alert: interface transmit rate
200         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
201         for: 5m
202         labels:
203           alertgroup: "{{ $labels.instance }}"
204         annotations:
205           bandwidth_used: "{{ $value | humanizePercentage }}"
206       - alert: interface receive rate
207         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
208         for: 5m
209         labels:
210           alertgroup: "{{ $labels.instance }}"
211         annotations:
212           bandwidth_used: "{{ $value | humanizePercentage }}"
213       - alert: interface transmit errors
214         expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
215         for: 5m
216         labels:
217           alertgroup: "{{ $labels.instance }}"
218         annotations:
219           error_rate: "{{ $value | humanizePercentage }}"
220       - alert: interface receive errors
221         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
222         for: 5m
223         labels:
224           alertgroup: "{{ $labels.instance }}"
225         annotations:
226           error_rate: "{{ $value | humanizePercentage }}"
227       - alert: conntrack entries
228         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
229         for: 5m
230         labels:
231           alertgroup: "{{ $labels.instance }}"
232         annotations:
233           entries_used: "{{ $value | humanizePercentage }}"
234   - name: planet
235     rules:
236       - alert: planet dump overdue
237         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
238         for: 24h
239         labels:
240           alertgroup: planet
241         annotations:
242           overdue_by: "{{ $value | humanizeDuration }}"
243       - alert: notes dump overdue
244         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
245         for: 6h
246         labels:
247           alertgroup: planet
248         annotations:
249           overdue_by: "{{ $value | humanizeDuration }}"
250       - alert: daily replication feed delayed
251         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
252         for: 3h
253         labels:
254           alertgroup: planet
255         annotations:
256           delayed_by: "{{ $value | humanizeDuration }}"
257       - alert: hourly replication feed delayed
258         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
259         for: 30m
260         labels:
261           alertgroup: planet
262         annotations:
263           delayed_by: "{{ $value | humanizeDuration }}"
264       - alert: minutely replication feed delayed
265         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
266         for: 5m
267         labels:
268           alertgroup: planet
269         annotations:
270           delayed_by: "{{ $value | humanizeDuration }}"
271       - alert: changeset replication feed delayed
272         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
273         for: 5m
274         labels:
275           alertgroup: planet
276         annotations:
277           delayed_by: "{{ $value | humanizeDuration }}"
278   - name: postgresql
279     rules:
280       - alert: postgresql down
281         expr: pg_up == 0
282         for: 1m
283         labels:
284           alertgroup: "{{ $labels.instance }}"
285       - alert: postgresql replication delay
286         expr: pg_replication_lag_seconds > 5
287         for: 1m
288         labels:
289           alertgroup: "{{ $labels.instance }}"
290         annotations:
291           delay: "{{ $value | humanizeDuration }}"
292       - alert: postgresql connection limit
293         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
294         for: 1m
295         labels:
296           alertgroup: "{{ $labels.instance }}"
297         annotations:
298           connections_used: "{{ $value | humanizePercentage }}"
299       - alert: postgresql deadlocks
300         expr: increase(pg_stat_database_deadlocks[1m]) > 5
301         for: 0m
302         labels:
303           alertgroup: "{{ $labels.instance }}"
304         annotations:
305           new_deadlocks: "{{ $value }}"
306       - alert: postgresql slow queries
307         expr: pg_slow_queries > 0
308         for: 5m
309         labels:
310           alertgroup: "{{ $labels.instance }}"
311         annotations:
312           queries: "{{ $value }}"
313   - name: smart
314     rules:
315       - alert: smart failure
316         expr: smart_health_status == 0
317         for: 60m
318         labels:
319           alertgroup: "{{ $labels.instance }}"
320       - alert: smart ssd wearout approaching
321         expr: smart_percentage_used >= 90
322         for: 60m
323         labels:
324           alertgroup: "{{ $labels.instance }}"
325         annotations:
326           percentage_used: "{{ $value | humanizePercentage }}"
327   - name: ssl
328     rules:
329       - alert: ssl certificate probe failed
330         expr: ssl_probe_success == 0
331         for: 60m
332         labels:
333           alertgroup: ssl
334       - alert: ssl certificate expiry
335         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
336         for: 0m
337         labels:
338           alertgroup: ssl
339         annotations:
340           expires_in: "{{ $value | humanizeDuration }}"
341       - alert: ssl certificate revoked
342         expr: ssl_ocsp_response_status == 1
343         for: 0m
344         labels:
345           alertgroup: ssl
346       - alert: ocsp status unknown
347         expr: ssl_ocsp_response_status == 1
348         for: 0m
349         labels:
350           alertgroup: ssl
351   - name: systemd
352     rules:
353       - alert: systemd failed service
354         expr: node_systemd_unit_state{state="failed"} == 1
355         for: 5m
356         labels:
357           alertgroup: "{{ $labels.instance }}"
358   - name: tile
359     rules:
360       - alert: renderd replication delay
361         expr: renderd_replication_delay > 120
362         for: 5m
363         labels:
364           alertgroup: tile
365         annotations:
366           delay: "{{ $value | humanizeDuration }}"
367       - alert: missed tile rate
368         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
369         for: 5m
370         labels:
371           alertgroup: tile
372         annotations:
373           miss_rate: "{{ $value | humanizePercentage }}"
374   - name: time
375     rules:
376       - alert: clock not synchronising
377         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
378         for: 5m
379         labels:
380           alertgroup: "{{ $labels.instance }}"
381       - alert: clock skew detected
382         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
383         for: 5m
384         labels:
385           alertgroup: "{{ $labels.instance }}"
386         annotations:
387           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
388   - name: web
389     rules:
390       - alert: web error rate
391         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
392         for: 5m
393         labels:
394           alertgroup: web
395         annotations:
396           error_rate: "{{ $value | humanizePercentage }}"