]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Fix alerting for failed chef runs
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site current draw
14         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48       - alert: apache low request rate
49         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
50         for: 15m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           request_rate: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
94         for: 15m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag_seconds > 30
103         for: 15m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: dublin
109     rules:
110       - alert: pdu current draw
111         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 13
112         for: 6m
113         labels:
114           alertgroup: "dublin"
115         annotations:
116           current: "{{ $value | humanize }}A"
117       - alert: site current draw
118         expr: sum(rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10) > 17
119         for: 6m
120         labels:
121           alertgroup: "dublin"
122         annotations:
123           current: "{{ $value | humanize }}A"
124       - alert: site temperature
125         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
126         for: 6m
127         labels:
128           alertgroup: "dublin"
129         annotations:
130           temperature: "{{ $value | humanize }}C"
131       - alert: site humidity
132         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
133         for: 6m
134         labels:
135           alertgroup: "dublin"
136         annotations:
137           humidity: "{{ $value | humanizePercentage }}"
138   - name: fastly
139     rules:
140       - alert: fastly error rate
141         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
142         for: 15m
143         labels:
144           alertgroup: fastly
145         annotations:
146           error_rate: "{{ $value | humanizePercentage }}"
147       - alert: fastly healthcheck failing
148         expr: count(fastly_healthcheck_status == 0) by (service) > 0
149         for: 15m
150         labels:
151           alertgroup: fastly
152       - alert: multiple fastly healthchecks failing
153         expr: count(fastly_healthcheck_status == 0) by (service) > 4
154         for: 5m
155         labels:
156           alertgroup: fastly
157   - name: filesystem
158     rules:
159       - alert: readonly filesystem
160         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
161         for: 0m
162         labels:
163           alertgroup: "{{ $labels.instance }}"
164       - alert: filesystem low on space
165         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
166         for: 5m
167         labels:
168           alertgroup: "{{ $labels.instance }}"
169         annotations:
170           percentage_free: "{{ $value | humanizePercentage }}"
171           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
172           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
173       - alert: filesystem low on inodes
174         expr: node_filesystem_files_free / node_filesystem_files < 0.1
175         for: 5m
176         labels:
177           alertgroup: "{{ $labels.instance }}"
178         annotations:
179           percentage_free: "{{ $value | humanizePercentage }}"
180           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
181           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
182   - name: hwmon
183     rules:
184       - alert: hwmon fan alarm
185         expr: node_hwmon_fan_alarm == 1
186         for: 5m
187         labels:
188           alertgroup: "{{ $labels.instance }}"
189         annotations:
190           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
191           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
192       - alert: hwmon temperature alarm
193         expr: node_hwmon_temp_alarm == 1
194         for: 5m
195         labels:
196           alertgroup: "{{ $labels.instance }}"
197         annotations:
198           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
199           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
200           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
201       - alert: hwmon voltage alarm
202         expr: node_hwmon_in_alarm == 1
203         for: 5m
204         labels:
205           alertgroup: "{{ $labels.instance }}"
206         annotations:
207           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
208           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
209           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
210   - name: io
211     rules:
212       - alert: io pressure
213         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
214         for: 60m
215         labels:
216           alertgroup: "{{ $labels.instance }}"
217         annotations:
218           pressure: "{{ $value | humanizePercentage }}"
219   - name: ipmi
220     rules:
221       - alert: ipmi fan alarm
222         expr: ipmi_fan_speed_state > 0
223         for: 5m
224         labels:
225           alertgroup: "{{ $labels.instance }}"
226         annotations:
227           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
228       - alert: ipmi temperature alarm
229         expr: ipmi_temperature_state > 0
230         for: 5m
231         labels:
232           alertgroup: "{{ $labels.instance }}"
233         annotations:
234           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
235       - alert: ipmi voltage alarm
236         expr: ipmi_voltage_state > 0
237         for: 5m
238         labels:
239           alertgroup: "{{ $labels.instance }}"
240         annotations:
241           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
242       - alert: ipmi power alarm
243         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
244         for: 5m
245         labels:
246           alertgroup: "{{ $labels.instance }}"
247   - name: juniper
248     rules:
249       - alert: juniper cpu alarm
250         expr: jnxOperatingCPU{jnxOperatingContentsIndex="7"} > 30
251         for: 5m
252         labels:
253           alertgroup: "{{ $labels.site }}"
254       - alert: juniper fan alarm
255         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
256         for: 5m
257         labels:
258           alertgroup: "{{ $labels.site }}"
259       - alert: juniper power alarm
260         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
261         for: 5m
262         labels:
263           alertgroup: "{{ $labels.site }}"
264   - name: mail
265     rules:
266       - alert: exim down
267         expr: exim_up == 0
268         for: 5m
269         labels:
270           alertgroup: "{{ $labels.instance }}"
271       - alert: exim queue length
272         expr: exim_queue > exim_queue_limit
273         for: 60m
274         labels:
275           alertgroup: mail
276         annotations:
277           queue_length: "{{ $value }}"
278       - alert: mailman queue length
279         expr: mailman_queue_length > 200
280         for: 60m
281         labels:
282           alertgroup: mail
283         annotations:
284           queue_length: "{{ $value }}"
285   - name: mdadm
286     rules:
287       - alert: mdadm array inactive
288         expr: node_md_state{state="inactive"} > 0
289         for: 0m
290         labels:
291           alertgroup: "{{ $labels.instance }}"
292         annotations:
293           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
294           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
295           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
296           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
297       - alert: mdadm array degraded
298         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
299         for: 0m
300         labels:
301           alertgroup: "{{ $labels.instance }}"
302         annotations:
303           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
304           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
305           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
306           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
307       - alert: mdadm disk failed
308         expr: node_md_disks{state="failed"} > 0
309         for: 0m
310         labels:
311           alertgroup: "{{ $labels.instance }}"
312         annotations:
313           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
314           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
315           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
316           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
317   - name: memory
318     rules:
319       - alert: low memory
320         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
321         for: 15m
322         labels:
323           alertgroup: "{{ $labels.instance }}"
324         annotations:
325           memory_free: "{{ $value | humanizePercentage }}"
326       - alert: memory pressure
327         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
328         for: 60m
329         labels:
330           alertgroup: "{{ $labels.instance }}"
331         annotations:
332           pressure: "{{ $value | humanizePercentage }}"
333       - alert: oom kill detected
334         expr: increase(node_vmstat_oom_kill[1m]) > 0
335         for: 0m
336         labels:
337           alertgroup: "{{ $labels.instance }}"
338         annotations:
339           new_oom_kills: "{{ $value }}"
340   - name: mysql
341     rules:
342       - alert: mysql down
343         expr: mysql_up == 0
344         for: 1m
345         labels:
346           alertgroup: "{{ $labels.instance }}"
347       - alert: mysql connection limit
348         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
349         for: 1m
350         labels:
351           alertgroup: "{{ $labels.instance }}"
352         annotations:
353           connections_used: "{{ $value | humanizePercentage }}"
354   - name: network
355     rules:
356       - alert: interface transmit rate
357         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
358         for: 5m
359         labels:
360           alertgroup: "{{ $labels.instance }}"
361         annotations:
362           bandwidth_used: "{{ $value | humanizePercentage }}"
363       - alert: interface receive rate
364         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
365         for: 5m
366         labels:
367           alertgroup: "{{ $labels.instance }}"
368         annotations:
369           bandwidth_used: "{{ $value | humanizePercentage }}"
370       - alert: interface transmit errors
371         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
372         for: 5m
373         labels:
374           alertgroup: "{{ $labels.instance }}"
375         annotations:
376           error_rate: "{{ $value | humanizePercentage }}"
377       - alert: wireguard interface transmit errors
378         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
379         for: 1h
380         labels:
381           alertgroup: "{{ $labels.instance }}"
382         annotations:
383           error_rate: "{{ $value | humanizePercentage }}"
384       - alert: interface receive errors
385         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
386         for: 5m
387         labels:
388           alertgroup: "{{ $labels.instance }}"
389         annotations:
390           error_rate: "{{ $value | humanizePercentage }}"
391       - alert: conntrack entries
392         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
393         for: 5m
394         labels:
395           alertgroup: "{{ $labels.instance }}"
396         annotations:
397           entries_used: "{{ $value | humanizePercentage }}"
398   - name: nominatim
399     rules:
400       - alert: nominatim replication delay
401         expr: nominatim_replication_delay > 10800
402         for: 1h
403         labels:
404           alertgroup: nominatim
405         annotations:
406           delay: "{{ $value | humanizeDuration }}"
407   - name: overpass
408     rules:
409       - alert: overpass osm database age
410         expr: overpass_database_age_seconds{database="osm"} > 3600
411         for: 1h
412         labels:
413           alertgroup: overpass
414         annotations:
415           age: "{{ $value | humanizeDuration }}"
416       - alert: overpass area database age
417         expr: overpass_database_age_seconds{database="area"} > 86400
418         for: 1h
419         labels:
420           alertgroup: overpass
421         annotations:
422           age: "{{ $value | humanizeDuration }}"
423   - name: passenger
424     rules:
425       - alert: passenger down
426         expr: passenger_up == 0
427         for: 5m
428         labels:
429           alertgroup: "{{ $labels.instance }}"
430       - alert: passenger queuing
431         expr: passenger_top_level_request_queue > 0
432         for: 5m
433         labels:
434           alertgroup: "{{ $labels.instance }}"
435       - alert: passenger application queuing
436         expr: passenger_app_request_queue > 0
437         for: 5m
438         labels:
439           alertgroup: "{{ $labels.instance }}"
440   - name: planet
441     rules:
442       - alert: planet dump overdue
443         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
444         for: 24h
445         labels:
446           alertgroup: planet
447         annotations:
448           overdue_by: "{{ $value | humanizeDuration }}"
449       - alert: notes dump overdue
450         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
451         for: 6h
452         labels:
453           alertgroup: planet
454         annotations:
455           overdue_by: "{{ $value | humanizeDuration }}"
456       - alert: daily replication feed delayed
457         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
458         for: 3h
459         labels:
460           alertgroup: planet
461         annotations:
462           delayed_by: "{{ $value | humanizeDuration }}"
463       - alert: hourly replication feed delayed
464         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
465         for: 30m
466         labels:
467           alertgroup: planet
468         annotations:
469           delayed_by: "{{ $value | humanizeDuration }}"
470       - alert: minutely replication feed delayed
471         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
472         for: 5m
473         labels:
474           alertgroup: planet
475         annotations:
476           delayed_by: "{{ $value | humanizeDuration }}"
477       - alert: changeset replication feed delayed
478         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
479         for: 5m
480         labels:
481           alertgroup: planet
482         annotations:
483           delayed_by: "{{ $value | humanizeDuration }}"
484   - name: postgresql
485     rules:
486       - alert: postgresql down
487         expr: pg_up == 0
488         for: 1m
489         labels:
490           alertgroup: "{{ $labels.instance }}"
491       - alert: postgresql replication delay
492         expr: pg_replication_lag_seconds > 30
493         for: 15m
494         labels:
495           alertgroup: "{{ $labels.instance }}"
496         annotations:
497           delay: "{{ $value | humanizeDuration }}"
498       - alert: postgresql connection limit
499         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
500         for: 1m
501         labels:
502           alertgroup: "{{ $labels.instance }}"
503         annotations:
504           connections_used: "{{ $value | humanizePercentage }}"
505       - alert: postgresql deadlocks
506         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
507         for: 0m
508         labels:
509           alertgroup: "{{ $labels.instance }}"
510         annotations:
511           new_deadlocks: "{{ $value }}"
512       - alert: postgresql slow queries
513         expr: pg_slow_queries > 0
514         for: 5m
515         labels:
516           alertgroup: "{{ $labels.instance }}"
517         annotations:
518           queries: "{{ $value }}"
519   - name: prometheus
520     rules:
521       - alert: prometheus configuration error
522         expr: prometheus_config_last_reload_successful == 0
523         for: 10m
524         labels:
525           alertgroup: "prometheus"
526       - alert: prometheus target missing
527         expr: up == 0
528         for: 10m
529         labels:
530           alertgroup: "prometheus"
531   - name: raid
532     rules:
533       - alert: raid controller battery failed
534         expr: ohai_controller_info{battery_status="failed"} > 0
535         for: 5m
536         labels:
537           alertgroup: "{{ $labels.instance }}"
538       - alert: raid array degraded
539         expr: ohai_array_info{status="degraded"} > 0
540         for: 5m
541         labels:
542           alertgroup: "{{ $labels.instance }}"
543       - alert: raid disk failed
544         expr: ohai_disk_info{status="failed"} > 0
545         for: 5m
546         labels:
547           alertgroup: "{{ $labels.instance }}"
548   - name: rasdaemon
549     rules:
550       - alert: memory controller errors
551         expr: increase(rasdaemon_mc_events_total[1m]) > 0
552         for: 0m
553         labels:
554           alertgroup: "{{ $labels.instance }}"
555         annotations:
556           new_errors: "{{ $value }}"
557       - alert: pcie aer errors
558         expr: increase(rasdaemon_aer_events_total[1m]) > 0
559         for: 0m
560         labels:
561           alertgroup: "{{ $labels.instance }}"
562         annotations:
563           new_errors: "{{ $value }}"
564   - name: smart
565     rules:
566       - alert: smart failure
567         expr: smart_health_status == 0
568         for: 60m
569         labels:
570           alertgroup: "{{ $labels.instance }}"
571       - alert: smart ssd wearout approaching
572         expr: smart_percentage_used >= 80
573         for: 60m
574         labels:
575           alertgroup: "{{ $labels.instance }}"
576         annotations:
577           percentage_used: "{{ $value | humanizePercentage }}"
578   - name: snmp
579     rules:
580       - alert: snmp pdus missing
581         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
582         for: 15m
583         labels:
584           alertgroup: snmp
585         annotations:
586           missing_pdus: "{{ $value }}"
587   - name: ssl
588     rules:
589       - alert: ssl certificate probe failed
590         expr: ssl_probe_success == 0
591         for: 60m
592         labels:
593           alertgroup: ssl
594       - alert: ssl certificate expiry
595         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
596         for: 0m
597         labels:
598           alertgroup: ssl
599         annotations:
600           expires_in: "{{ $value | humanizeDuration }}"
601       - alert: ssl certificate revoked
602         expr: ssl_ocsp_response_status == 1
603         for: 0m
604         labels:
605           alertgroup: ssl
606       - alert: ocsp status unknown
607         expr: ssl_ocsp_response_status == 1
608         for: 0m
609         labels:
610           alertgroup: ssl
611   - name: statuscake
612     rules:
613       - alert: statuscake uptime check failing
614         expr: statuscake_uptime{status="down",paused="false"} > 0
615         for: 10m
616         labels:
617           alertgroup: statuscake
618   - name: systemd
619     rules:
620       - alert: systemd failed service
621         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
622         for: 5m
623         labels:
624           alertgroup: "{{ $labels.instance }}"
625       - alert: systemd failed chef client service
626         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
627         for: 0m
628         labels:
629           alertgroup: "{{ $labels.instance }}"
630   - name: tile
631     rules:
632       - alert: renderd replication delay
633         expr: renderd_replication_delay > 120
634         for: 15m
635         labels:
636           alertgroup: tile
637         annotations:
638           delay: "{{ $value | humanizeDuration }}"
639       - alert: missed tile rate
640         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
641         for: 5m
642         labels:
643           alertgroup: tile
644         annotations:
645           miss_rate: "{{ $value | humanizePercentage }}"
646       - alert: tile render rate
647         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
648         for: 15m
649         labels:
650           alertgroup: tile
651         annotations:
652           render_rate: "{{ $value }} tiles/s"
653   - name: time
654     rules:
655       - alert: clock not synchronising
656         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
657         for: 5m
658         labels:
659           alertgroup: "{{ $labels.instance }}"
660       - alert: clock skew detected
661         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
662         for: 5m
663         labels:
664           alertgroup: "{{ $labels.instance }}"
665         annotations:
666           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
667   - name: web
668     rules:
669       - alert: web error rate
670         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
671         for: 5m
672         labels:
673           alertgroup: web
674         annotations:
675           error_rate: "{{ $value | humanizePercentage }}"
676       - alert: job processing rate
677         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
678         for: 15m
679         labels:
680           alertgroup: web
681         annotations:
682           job_processing_rate: "{{ $value | humanizePercentage }}"