]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Enable zone labels for rapl metrics
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: alertmanager
5     rules:
6       - alert: prometheus target missing
7         expr: up == 0
8         for: 5m
9         labels:
10           alertgroup: "prometheus"
11   - name: amsterdam
12     rules:
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site current draw
21         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}A"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 5m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cpu
65     rules:
66       - alert: cpu pressure
67         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
68         for: 15m
69         labels:
70           alertgroup: "{{ $labels.instance }}"
71         annotations:
72           pressure: "{{ $value | humanizePercentage }}"
73   - name: database
74     rules:
75       - alert: postgres replication delay
76         expr: pg_replication_lag_seconds > 5
77         for: 5m
78         labels:
79           alertgroup: database
80         annotations:
81           delay: "{{ $value | humanizeDuration }}"
82   - name: fastly
83     rules:
84       - alert: error rate
85         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
86         for: 15m
87         labels:
88           alertgroup: fastly
89         annotations:
90           error_rate: "{{ $value | humanizePercentage }}"
91   - name: filesystem
92     rules:
93       - alert: readonly filesystem
94         expr: node_filesystem_readonly == 1
95         for: 0m
96         labels:
97           alertgroup: "{{ $labels.instance }}"
98       - alert: filesystem low on space
99         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
100         for: 5m
101         labels:
102           alertgroup: "{{ $labels.instance }}"
103         annotations:
104           percentage_free: "{{ $value | humanizePercentage }}"
105           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
106           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
107       - alert: filesystem low on inodes
108         expr: node_filesystem_files_free / node_filesystem_files < 0.1
109         for: 5m
110         labels:
111           alertgroup: "{{ $labels.instance }}"
112         annotations:
113           percentage_free: "{{ $value | humanizePercentage }}"
114           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
115           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
116   - name: hwmon
117     rules:
118       - alert: hwmon fan alarm
119         expr: node_hwmon_fan_alarm == 1
120         for: 5m
121         labels:
122           alertgroup: "{{ $labels.instance }}"
123         annotations:
124           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
125           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
126       - alert: hwmon temperature alarm
127         expr: node_hwmon_temp_alarm == 1
128         for: 5m
129         labels:
130           alertgroup: "{{ $labels.instance }}"
131         annotations:
132           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
133           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
134           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
135       - alert: hwmon voltage alarm
136         expr: node_hwmon_in_alarm == 1
137         for: 5m
138         labels:
139           alertgroup: "{{ $labels.instance }}"
140         annotations:
141           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
142           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
143           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
144   - name: io
145     rules:
146       - alert: io pressure
147         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
148         for: 60m
149         labels:
150           alertgroup: "{{ $labels.instance }}"
151         annotations:
152           pressure: "{{ $value | humanizePercentage }}"
153   - name: ipmi
154     rules:
155       - alert: ipmi fan alarm
156         expr: ipmi_fan_speed_state > 0
157         for: 5m
158         labels:
159           alertgroup: "{{ $labels.instance }}"
160         annotations:
161           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
162       - alert: ipmi temperature alarm
163         expr: ipmi_temperature_state > 0
164         for: 5m
165         labels:
166           alertgroup: "{{ $labels.instance }}"
167         annotations:
168           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
169       - alert: ipmi voltage alarm
170         expr: ipmi_voltage_state > 0
171         for: 5m
172         labels:
173           alertgroup: "{{ $labels.instance }}"
174         annotations:
175           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
176       - alert: ipmi power alarm
177         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
178         for: 5m
179         labels:
180           alertgroup: "{{ $labels.instance }}"
181   - name: mail
182     rules:
183       - alert: exim queue length
184         expr: exim_queue > exim_queue_limit
185         for: 60m
186         labels:
187           alertgroup: mail
188         annotations:
189           queue_length: "{{ $value }}"
190       - alert: mailman queue length
191         expr: mailman_queue_length > 200
192         for: 60m
193         labels:
194           alertgroup: mail
195         annotations:
196           queue_length: "{{ $value }}"
197   - name: mdadm
198     rules:
199       - alert: mdadm array inactive
200         expr: node_md_state{state="inactive"} > 0
201         for: 0m
202         labels:
203           alertgroup: "{{ $labels.instance }}"
204         annotations:
205           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
206           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
207           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
208           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
209       - alert: mdadm array degraded
210         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
211         for: 0m
212         labels:
213           alertgroup: "{{ $labels.instance }}"
214         annotations:
215           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
216           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
217           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
218           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
219       - alert: mdadm disk failed
220         expr: node_md_disks{state="failed"} > 0
221         for: 0m
222         labels:
223           alertgroup: "{{ $labels.instance }}"
224         annotations:
225           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
226           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
227           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
228           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
229   - name: memory
230     rules:
231       - alert: low memory
232         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
233         for: 15m
234         labels:
235           alertgroup: "{{ $labels.instance }}"
236         annotations:
237           memory_free: "{{ $value | humanizePercentage }}"
238       - alert: memory pressure
239         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
240         for: 60m
241         labels:
242           alertgroup: "{{ $labels.instance }}"
243         annotations:
244           pressure: "{{ $value | humanizePercentage }}"
245       - alert: oom kill detected
246         expr: increase(node_vmstat_oom_kill[1m]) > 0
247         for: 0m
248         labels:
249           alertgroup: "{{ $labels.instance }}"
250         annotations:
251           new_oom_kills: "{{ $value }}"
252   - name: network
253     rules:
254       - alert: interface transmit rate
255         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
256         for: 5m
257         labels:
258           alertgroup: "{{ $labels.instance }}"
259         annotations:
260           bandwidth_used: "{{ $value | humanizePercentage }}"
261       - alert: interface receive rate
262         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.instance }}"
266         annotations:
267           bandwidth_used: "{{ $value | humanizePercentage }}"
268       - alert: interface transmit errors
269         expr: rate(node_network_transmit_errs_total[1m]) / rate(node_network_transmit_packets_total[1m]) > 0.01
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273         annotations:
274           error_rate: "{{ $value | humanizePercentage }}"
275       - alert: interface receive errors
276         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280         annotations:
281           error_rate: "{{ $value | humanizePercentage }}"
282       - alert: conntrack entries
283         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.instance }}"
287         annotations:
288           entries_used: "{{ $value | humanizePercentage }}"
289   - name: planet
290     rules:
291       - alert: planet dump overdue
292         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
293         for: 24h
294         labels:
295           alertgroup: planet
296         annotations:
297           overdue_by: "{{ $value | humanizeDuration }}"
298       - alert: notes dump overdue
299         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
300         for: 6h
301         labels:
302           alertgroup: planet
303         annotations:
304           overdue_by: "{{ $value | humanizeDuration }}"
305       - alert: daily replication feed delayed
306         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
307         for: 3h
308         labels:
309           alertgroup: planet
310         annotations:
311           delayed_by: "{{ $value | humanizeDuration }}"
312       - alert: hourly replication feed delayed
313         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
314         for: 30m
315         labels:
316           alertgroup: planet
317         annotations:
318           delayed_by: "{{ $value | humanizeDuration }}"
319       - alert: minutely replication feed delayed
320         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
321         for: 5m
322         labels:
323           alertgroup: planet
324         annotations:
325           delayed_by: "{{ $value | humanizeDuration }}"
326       - alert: changeset replication feed delayed
327         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
328         for: 5m
329         labels:
330           alertgroup: planet
331         annotations:
332           delayed_by: "{{ $value | humanizeDuration }}"
333   - name: postgresql
334     rules:
335       - alert: postgresql down
336         expr: pg_up == 0
337         for: 1m
338         labels:
339           alertgroup: "{{ $labels.instance }}"
340       - alert: postgresql replication delay
341         expr: pg_replication_lag_seconds > 5
342         for: 1m
343         labels:
344           alertgroup: "{{ $labels.instance }}"
345         annotations:
346           delay: "{{ $value | humanizeDuration }}"
347       - alert: postgresql connection limit
348         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
349         for: 1m
350         labels:
351           alertgroup: "{{ $labels.instance }}"
352         annotations:
353           connections_used: "{{ $value | humanizePercentage }}"
354       - alert: postgresql deadlocks
355         expr: increase(pg_stat_database_deadlocks[1m]) > 5
356         for: 0m
357         labels:
358           alertgroup: "{{ $labels.instance }}"
359         annotations:
360           new_deadlocks: "{{ $value }}"
361       - alert: postgresql slow queries
362         expr: pg_slow_queries > 0
363         for: 5m
364         labels:
365           alertgroup: "{{ $labels.instance }}"
366         annotations:
367           queries: "{{ $value }}"
368   - name: smart
369     rules:
370       - alert: smart failure
371         expr: smart_health_status == 0
372         for: 60m
373         labels:
374           alertgroup: "{{ $labels.instance }}"
375       - alert: smart ssd wearout approaching
376         expr: smart_percentage_used >= 90
377         for: 60m
378         labels:
379           alertgroup: "{{ $labels.instance }}"
380         annotations:
381           percentage_used: "{{ $value | humanizePercentage }}"
382   - name: ssl
383     rules:
384       - alert: ssl certificate probe failed
385         expr: ssl_probe_success == 0
386         for: 60m
387         labels:
388           alertgroup: ssl
389       - alert: ssl certificate expiry
390         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
391         for: 0m
392         labels:
393           alertgroup: ssl
394         annotations:
395           expires_in: "{{ $value | humanizeDuration }}"
396       - alert: ssl certificate revoked
397         expr: ssl_ocsp_response_status == 1
398         for: 0m
399         labels:
400           alertgroup: ssl
401       - alert: ocsp status unknown
402         expr: ssl_ocsp_response_status == 1
403         for: 0m
404         labels:
405           alertgroup: ssl
406   - name: systemd
407     rules:
408       - alert: systemd failed service
409         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
410         for: 5m
411         labels:
412           alertgroup: "{{ $labels.instance }}"
413       - alert: systemd failed service
414         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
415         for: 6h
416         labels:
417           alertgroup: "{{ $labels.instance }}"
418   - name: tile
419     rules:
420       - alert: renderd replication delay
421         expr: renderd_replication_delay > 120
422         for: 15m
423         labels:
424           alertgroup: tile
425         annotations:
426           delay: "{{ $value | humanizeDuration }}"
427       - alert: missed tile rate
428         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
429         for: 5m
430         labels:
431           alertgroup: tile
432         annotations:
433           miss_rate: "{{ $value | humanizePercentage }}"
434   - name: time
435     rules:
436       - alert: clock not synchronising
437         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
438         for: 5m
439         labels:
440           alertgroup: "{{ $labels.instance }}"
441       - alert: clock skew detected
442         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
443         for: 5m
444         labels:
445           alertgroup: "{{ $labels.instance }}"
446         annotations:
447           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
448   - name: web
449     rules:
450       - alert: web error rate
451         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
452         for: 5m
453         labels:
454           alertgroup: web
455         annotations:
456           error_rate: "{{ $value | humanizePercentage }}"
457       - alert: job processing rate
458         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
459         for: 15m
460         labels:
461           alertgroup: web
462         annotations:
463           job_processing_rate: "{{ $value | humanizePercentage }}"