]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
8956104a8986b210ad10393982e3bb09e596f434
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site power
14         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}kVA"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48   - name: chef
49     rules:
50       - alert: chef client not running
51         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
52         for: 12h
53         labels:
54           alertgroup: "{{ $labels.instance }}"
55         annotations:
56           down_time: "{{ $value | humanizeDuration }}"
57   - name: cisco
58     rules:
59       - alert: cisco fan alarm
60         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
61         for: 5m
62         labels:
63           alertgroup: "{{ $labels.site }}"
64         annotations:
65           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
66       - alert: cisco temperature alarm
67         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
73       - alert: cisco main power alarm
74         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78       - alert: cisco redundant power alarm
79         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
80         for: 5m
81         labels:
82           alertgroup: "{{ $labels.site }}"
83   - name: cpu
84     rules:
85       - alert: cpu pressure
86         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
87         for: 60m
88         labels:
89           alertgroup: "{{ $labels.instance }}"
90         annotations:
91           pressure: "{{ $value | humanizePercentage }}"
92   - name: database
93     rules:
94       - alert: postgres replication delay
95         expr: pg_replication_lag_seconds > 30
96         for: 15m
97         labels:
98           alertgroup: database
99         annotations:
100           delay: "{{ $value | humanizeDuration }}"
101   - name: discourse
102     rules:
103       - alert: discourse job failure rate
104         expr: rate(discourse_job_failures[5m]) > 0
105         for: 5m
106         labels:
107           alertgroup: discourse
108         annotations:
109           failure_rate: "{{ $value }} jobs/s"
110   - name: dublin
111     rules:
112       - alert: pdu current draw
113         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
114         for: 6m
115         labels:
116           alertgroup: "dublin"
117         annotations:
118           current: "{{ $value | humanize }}A"
119       - alert: site power
120         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
121         for: 6m
122         labels:
123           alertgroup: "dublin"
124         annotations:
125           current: "{{ $value | humanize }}kVA"
126       - alert: site temperature
127         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           temperature: "{{ $value | humanize }}C"
133       - alert: site humidity
134         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           humidity: "{{ $value | humanizePercentage }}"
140   - name: fastly
141     rules:
142       - alert: fastly error rate
143         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
144         for: 15m
145         labels:
146           alertgroup: fastly
147         annotations:
148           error_rate: "{{ $value | humanizePercentage }}"
149       - alert: fastly healthcheck failing
150         expr: count(fastly_healthcheck_status == 0) by (service) > 0
151         for: 15m
152         labels:
153           alertgroup: fastly
154       - alert: multiple fastly healthchecks failing
155         expr: count(fastly_healthcheck_status == 0) by (service) > 4
156         for: 5m
157         labels:
158           alertgroup: fastly
159   - name: filesystem
160     rules:
161       - alert: readonly filesystem
162         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
163         for: 0m
164         labels:
165           alertgroup: "{{ $labels.instance }}"
166       - alert: filesystem low on space
167         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
168         for: 5m
169         labels:
170           alertgroup: "{{ $labels.instance }}"
171         annotations:
172           percentage_free: "{{ $value | humanizePercentage }}"
173           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
174           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
175       - alert: filesystem low on inodes
176         expr: node_filesystem_files_free / node_filesystem_files < 0.1
177         for: 5m
178         labels:
179           alertgroup: "{{ $labels.instance }}"
180         annotations:
181           percentage_free: "{{ $value | humanizePercentage }}"
182           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
183           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
184   - name: hwmon
185     rules:
186       - alert: hwmon fan alarm
187         expr: node_hwmon_fan_alarm == 1
188         for: 5m
189         labels:
190           alertgroup: "{{ $labels.instance }}"
191         annotations:
192           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
193           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
194       - alert: hwmon temperature alarm
195         expr: node_hwmon_temp_alarm == 1
196         for: 5m
197         labels:
198           alertgroup: "{{ $labels.instance }}"
199         annotations:
200           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
201           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
202           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
203       - alert: hwmon voltage alarm
204         expr: node_hwmon_in_alarm == 1
205         for: 5m
206         labels:
207           alertgroup: "{{ $labels.instance }}"
208         annotations:
209           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
210           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
211           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
212   - name: io
213     rules:
214       - alert: io pressure
215         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
216         for: 60m
217         labels:
218           alertgroup: "{{ $labels.instance }}"
219         annotations:
220           pressure: "{{ $value | humanizePercentage }}"
221   - name: ipmi
222     rules:
223       - alert: ipmi fan alarm
224         expr: ipmi_fan_speed_state > 0
225         for: 5m
226         labels:
227           alertgroup: "{{ $labels.instance }}"
228         annotations:
229           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
230       - alert: ipmi temperature alarm
231         expr: ipmi_temperature_state > 0
232         for: 5m
233         labels:
234           alertgroup: "{{ $labels.instance }}"
235         annotations:
236           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
237       - alert: ipmi voltage alarm
238         expr: ipmi_voltage_state > 0
239         for: 5m
240         labels:
241           alertgroup: "{{ $labels.instance }}"
242         annotations:
243           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
244       - alert: ipmi power alarm
245         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
246         for: 5m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249   - name: juniper
250     rules:
251       - alert: juniper cpu alarm
252         expr: jnxOperating5MinLoadAvg{jnxOperatingContentsIndex="9"} / 200 > 0.5
253         for: 5m
254         labels:
255           alertgroup: "{{ $labels.site }}"
256         annotations:
257           load_average: "{{ $value | humanizePercentage }}"
258       - alert: juniper fan alarm
259         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
260         for: 5m
261         labels:
262           alertgroup: "{{ $labels.site }}"
263       - alert: juniper power alarm
264         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
265         for: 5m
266         labels:
267           alertgroup: "{{ $labels.site }}"
268   - name: mail
269     rules:
270       - alert: exim down
271         expr: exim_up == 0
272         for: 5m
273         labels:
274           alertgroup: "{{ $labels.instance }}"
275       - alert: exim queue length
276         expr: exim_queue > ignoring(job) exim_queue_limit
277         for: 60m
278         labels:
279           alertgroup: mail
280         annotations:
281           queue_length: "{{ $value }}"
282       - alert: mailman queue length
283         expr: mailman_queue_length > 200
284         for: 60m
285         labels:
286           alertgroup: mail
287         annotations:
288           queue_length: "{{ $value }}"
289   - name: mdadm
290     rules:
291       - alert: mdadm array inactive
292         expr: node_md_state{state="inactive"} > 0
293         for: 0m
294         labels:
295           alertgroup: "{{ $labels.instance }}"
296         annotations:
297           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
298           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
299           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
300           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
301       - alert: mdadm array degraded
302         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
303         for: 0m
304         labels:
305           alertgroup: "{{ $labels.instance }}"
306         annotations:
307           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
308           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
309           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
310           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
311       - alert: mdadm disk failed
312         expr: node_md_disks{state="failed"} > 0
313         for: 0m
314         labels:
315           alertgroup: "{{ $labels.instance }}"
316         annotations:
317           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
318           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
319           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
320           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
321   - name: memory
322     rules:
323       - alert: low memory
324         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
325         for: 15m
326         labels:
327           alertgroup: "{{ $labels.instance }}"
328         annotations:
329           memory_free: "{{ $value | humanizePercentage }}"
330       - alert: memory pressure
331         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
332         for: 60m
333         labels:
334           alertgroup: "{{ $labels.instance }}"
335         annotations:
336           pressure: "{{ $value | humanizePercentage }}"
337       - alert: oom kill detected
338         expr: increase(node_vmstat_oom_kill[1m]) > 0
339         for: 0m
340         labels:
341           alertgroup: "{{ $labels.instance }}"
342         annotations:
343           new_oom_kills: "{{ $value }}"
344   - name: mysql
345     rules:
346       - alert: mysql down
347         expr: mysql_up == 0
348         for: 1m
349         labels:
350           alertgroup: "{{ $labels.instance }}"
351       - alert: mysql connection limit
352         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
353         for: 1m
354         labels:
355           alertgroup: "{{ $labels.instance }}"
356         annotations:
357           connections_used: "{{ $value | humanizePercentage }}"
358   - name: network
359     rules:
360       - alert: interface transmit rate
361         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
362         for: 5m
363         labels:
364           alertgroup: "{{ $labels.instance }}"
365         annotations:
366           bandwidth_used: "{{ $value | humanizePercentage }}"
367       - alert: interface receive rate
368         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
369         for: 5m
370         labels:
371           alertgroup: "{{ $labels.instance }}"
372         annotations:
373           bandwidth_used: "{{ $value | humanizePercentage }}"
374       - alert: interface transmit errors
375         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
376         for: 5m
377         labels:
378           alertgroup: "{{ $labels.instance }}"
379         annotations:
380           error_rate: "{{ $value | humanizePercentage }}"
381       - alert: wireguard interface transmit errors
382         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
383         for: 1h
384         labels:
385           alertgroup: "{{ $labels.instance }}"
386         annotations:
387           error_rate: "{{ $value | humanizePercentage }}"
388       - alert: interface receive errors
389         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
390         for: 5m
391         labels:
392           alertgroup: "{{ $labels.instance }}"
393         annotations:
394           error_rate: "{{ $value | humanizePercentage }}"
395       - alert: conntrack entries
396         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
397         for: 5m
398         labels:
399           alertgroup: "{{ $labels.instance }}"
400         annotations:
401           entries_used: "{{ $value | humanizePercentage }}"
402   - name: nominatim
403     rules:
404       - alert: nominatim replication delay
405         expr: nominatim_replication_delay > 10800
406         for: 1h
407         labels:
408           alertgroup: nominatim
409         annotations:
410           delay: "{{ $value | humanizeDuration }}"
411   - name: overpass
412     rules:
413       - alert: overpass osm database age
414         expr: overpass_database_age_seconds{database="osm"} > 3600
415         for: 1h
416         labels:
417           alertgroup: overpass
418         annotations:
419           age: "{{ $value | humanizeDuration }}"
420       - alert: overpass area database age
421         expr: overpass_database_age_seconds{database="area"} > 86400
422         for: 1h
423         labels:
424           alertgroup: overpass
425         annotations:
426           age: "{{ $value | humanizeDuration }}"
427   - name: passenger
428     rules:
429       - alert: passenger down
430         expr: passenger_up == 0
431         for: 5m
432         labels:
433           alertgroup: "{{ $labels.instance }}"
434       - alert: passenger queuing
435         expr: passenger_top_level_request_queue > 0
436         for: 5m
437         labels:
438           alertgroup: "{{ $labels.instance }}"
439       - alert: passenger application queuing
440         expr: passenger_app_request_queue > 0
441         for: 5m
442         labels:
443           alertgroup: "{{ $labels.instance }}"
444   - name: planet
445     rules:
446       - alert: planet dump overdue
447         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
448         for: 24h
449         labels:
450           alertgroup: planet
451         annotations:
452           overdue_by: "{{ $value | humanizeDuration }}"
453       - alert: notes dump overdue
454         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
455         for: 6h
456         labels:
457           alertgroup: planet
458         annotations:
459           overdue_by: "{{ $value | humanizeDuration }}"
460       - alert: daily replication feed delayed
461         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
462         for: 3h
463         labels:
464           alertgroup: planet
465         annotations:
466           delayed_by: "{{ $value | humanizeDuration }}"
467       - alert: hourly replication feed delayed
468         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
469         for: 30m
470         labels:
471           alertgroup: planet
472         annotations:
473           delayed_by: "{{ $value | humanizeDuration }}"
474       - alert: minutely replication feed delayed
475         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
476         for: 5m
477         labels:
478           alertgroup: planet
479         annotations:
480           delayed_by: "{{ $value | humanizeDuration }}"
481       - alert: changeset replication feed delayed
482         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
483         for: 5m
484         labels:
485           alertgroup: planet
486         annotations:
487           delayed_by: "{{ $value | humanizeDuration }}"
488   - name: postgresql
489     rules:
490       - alert: postgresql down
491         expr: pg_up == 0
492         for: 1m
493         labels:
494           alertgroup: "{{ $labels.instance }}"
495       - alert: postgresql replication delay
496         expr: pg_replication_lag_seconds > 30
497         for: 15m
498         labels:
499           alertgroup: "{{ $labels.instance }}"
500         annotations:
501           delay: "{{ $value | humanizeDuration }}"
502       - alert: postgresql connection limit
503         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
504         for: 1m
505         labels:
506           alertgroup: "{{ $labels.instance }}"
507         annotations:
508           connections_used: "{{ $value | humanizePercentage }}"
509       - alert: postgresql deadlocks
510         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
511         for: 0m
512         labels:
513           alertgroup: "{{ $labels.instance }}"
514         annotations:
515           new_deadlocks: "{{ $value }}"
516       - alert: postgresql slow queries
517         expr: pg_slow_queries > 0
518         for: 5m
519         labels:
520           alertgroup: "{{ $labels.instance }}"
521         annotations:
522           queries: "{{ $value }}"
523       - alert: postgresql idle transactions
524         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
525         for: 5m
526         labels:
527           alertgroup: "{{ $labels.instance }}"
528         annotations:
529           queries: "{{ $value }}"
530   - name: prometheus
531     rules:
532       - alert: prometheus configuration error
533         expr: prometheus_config_last_reload_successful == 0
534         for: 10m
535         labels:
536           alertgroup: "prometheus"
537       - alert: prometheus target missing
538         expr: up == 0
539         for: 10m
540         labels:
541           alertgroup: "prometheus"
542   - name: raid
543     rules:
544       - alert: raid controller battery failed
545         expr: ohai_controller_info{battery_status="failed"} > 0
546         for: 5m
547         labels:
548           alertgroup: "{{ $labels.instance }}"
549       - alert: raid controller battery recharging
550         expr: ohai_controller_info{battery_status="recharging"} > 0
551         for: 4h
552         labels:
553           alertgroup: "{{ $labels.instance }}"
554       - alert: raid array degraded
555         expr: ohai_array_info{status="degraded"} > 0
556         for: 5m
557         labels:
558           alertgroup: "{{ $labels.instance }}"
559       - alert: raid disk failed
560         expr: ohai_disk_info{status="failed"} > 0
561         for: 5m
562         labels:
563           alertgroup: "{{ $labels.instance }}"
564   - name: rasdaemon
565     rules:
566       - alert: memory controller errors
567         expr: increase(rasdaemon_mc_events_total[1m]) > 0
568         for: 0m
569         labels:
570           alertgroup: "{{ $labels.instance }}"
571         annotations:
572           new_errors: "{{ $value }}"
573       - alert: pcie aer errors
574         expr: increase(rasdaemon_aer_events_total[1m]) > 0
575         for: 0m
576         labels:
577           alertgroup: "{{ $labels.instance }}"
578         annotations:
579           new_errors: "{{ $value }}"
580   - name: smart
581     rules:
582       - alert: smart failure
583         expr: smart_health_status == 0
584         for: 60m
585         labels:
586           alertgroup: "{{ $labels.instance }}"
587       - alert: smart ssd wearout approaching
588         expr: smart_percentage_used / 100 >= 0.8
589         for: 60m
590         labels:
591           alertgroup: "{{ $labels.instance }}"
592         annotations:
593           percentage_used: "{{ $value | humanizePercentage }}"
594   - name: smokeping
595     rules:
596       - alert: packet loss
597         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
598         for: 10m
599         labels:
600           alertgroup: smokeping
601         annotations:
602           loss_rate: "{{ $value | humanizePercentage }}"
603   - name: snmp
604     rules:
605       - alert: snmp pdus missing
606         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
607         for: 15m
608         labels:
609           alertgroup: snmp
610         annotations:
611           missing_pdus: "{{ $value }}"
612   - name: ssl
613     rules:
614       - alert: ssl certificate probe failed
615         expr: ssl_probe_success == 0
616         for: 60m
617         labels:
618           alertgroup: ssl
619       - alert: ssl certificate expiry
620         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
621         for: 0m
622         labels:
623           alertgroup: ssl
624         annotations:
625           expires_in: "{{ $value | humanizeDuration }}"
626       - alert: ssl certificate revoked
627         expr: ssl_ocsp_response_status == 1
628         for: 0m
629         labels:
630           alertgroup: ssl
631       - alert: ocsp status unknown
632         expr: ssl_ocsp_response_status == 1
633         for: 0m
634         labels:
635           alertgroup: ssl
636   - name: statuscake
637     rules:
638       - alert: statuscake uptime check failing
639         expr: statuscake_paused == 0 and statuscake_up == 0
640         for: 10m
641         labels:
642           alertgroup: statuscake
643   - name: systemd
644     rules:
645       - alert: systemd failed service
646         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
647         for: 5m
648         labels:
649           alertgroup: "{{ $labels.instance }}"
650       - alert: systemd failed chef client service
651         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
652         for: 0m
653         labels:
654           alertgroup: "{{ $labels.instance }}"
655   - name: taginfo
656     rules:
657       - alert: taginfo planet age
658         expr: time() - taginfo_data_from_seconds > 129600
659         for: 0m
660         labels:
661           alertgroup: taginfo
662         annotations:
663           age: "{{ $value | humanizeDuration }}"
664       - alert: taginfo database age
665         expr: time() - taginfo_database_update_finish_seconds > 129600
666         for: 0m
667         labels:
668           alertgroup: taginfo
669         annotations:
670           age: "{{ $value | humanizeDuration }}"
671       - alert: taginfo database size
672         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
673         for: 30m
674         labels:
675           alertgroup: taginfo
676         annotations:
677           size_change: "{{ $value | humanizePercentage }}"
678   - name: tile
679     rules:
680       - alert: renderd replication delay
681         expr: renderd_replication_delay > 120
682         for: 15m
683         labels:
684           alertgroup: tile
685         annotations:
686           delay: "{{ $value | humanizeDuration }}"
687       - alert: missed tile rate
688         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
689         for: 5m
690         labels:
691           alertgroup: tile
692         annotations:
693           miss_rate: "{{ $value | humanizePercentage }}"
694       - alert: tile render rate
695         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
696         for: 15m
697         labels:
698           alertgroup: tile
699         annotations:
700           render_rate: "{{ $value }} tiles/s"
701   - name: time
702     rules:
703       - alert: clock not synchronising
704         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
705         for: 5m
706         labels:
707           alertgroup: "{{ $labels.instance }}"
708       - alert: clock skew detected
709         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
710         for: 5m
711         labels:
712           alertgroup: "{{ $labels.instance }}"
713         annotations:
714           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
715   - name: web
716     rules:
717       - alert: web error rate
718         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
719         for: 5m
720         labels:
721           alertgroup: web
722         annotations:
723           error_rate: "{{ $value | humanizePercentage }}"
724       - alert: job processing rate
725         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
726         for: 1h
727         labels:
728           alertgroup: web
729         annotations:
730           job_processing_rate: "{{ $value | humanizePercentage }}"