]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
956c0d5b3a729935bedc1bbf087672c79ddf1180
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: uplink
7         expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache connection limit
56         expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
57         for: 5m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           connections: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
101         for: 60m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: postgres replication delay
109         expr: pg_replication_lag_seconds > 30
110         for: 15m
111         labels:
112           alertgroup: database
113         annotations:
114           delay: "{{ $value | humanizeDuration }}"
115   - name: discourse
116     rules:
117       - alert: discourse job failure rate
118         expr: rate(discourse_job_failures[5m]) > 0
119         for: 5m
120         labels:
121           alertgroup: discourse
122         annotations:
123           failure_rate: "{{ $value }} jobs/s"
124   - name: dublin
125     rules:
126       - alert: uplink
127         expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           status: "{{ $value }}"
133       - alert: pdu current draw
134         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           current: "{{ $value | humanize }}A"
140       - alert: site power
141         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           current: "{{ $value | humanize }}kVA"
147       - alert: site temperature
148         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           temperature: "{{ $value | humanize }}C"
154       - alert: site humidity
155         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
156         for: 6m
157         labels:
158           alertgroup: "dublin"
159         annotations:
160           humidity: "{{ $value | humanizePercentage }}"
161   - name: fastly
162     rules:
163       - alert: fastly error rate
164         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
165         for: 15m
166         labels:
167           alertgroup: fastly
168         annotations:
169           error_rate: "{{ $value | humanizePercentage }}"
170       - alert: fastly frontend healthcheck warning
171         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
172         for: 15m
173         labels:
174           alertgroup: fastly
175       - alert: fastly frontend healthcheck critical
176         expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
177         for: 5m
178         labels:
179           alertgroup: fastly
180       - alert: fastly backend healthcheck warning
181         expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
182         for: 15m
183         labels:
184           alertgroup: fastly
185       - alert: fastly backend healthcheck critical
186         expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
187         for: 5m
188         labels:
189           alertgroup: fastly
190   - name: filesystem
191     rules:
192       - alert: readonly filesystem
193         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
194         for: 0m
195         labels:
196           alertgroup: "{{ $labels.instance }}"
197       - alert: filesystem low on space
198         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
199         for: 5m
200         labels:
201           alertgroup: "{{ $labels.instance }}"
202         annotations:
203           percentage_free: "{{ $value | humanizePercentage }}"
204           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
205           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
206       - alert: filesystem low on inodes
207         expr: node_filesystem_files_free / node_filesystem_files < 0.1
208         for: 5m
209         labels:
210           alertgroup: "{{ $labels.instance }}"
211         annotations:
212           percentage_free: "{{ $value | humanizePercentage }}"
213           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
214           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
215   - name: hwmon
216     rules:
217       - alert: hwmon fan alarm
218         expr: node_hwmon_fan_alarm == 1
219         for: 5m
220         labels:
221           alertgroup: "{{ $labels.instance }}"
222         annotations:
223           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
224           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
225       - alert: hwmon temperature alarm
226         expr: node_hwmon_temp_alarm == 1
227         for: 5m
228         labels:
229           alertgroup: "{{ $labels.instance }}"
230         annotations:
231           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
232           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
233           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
234       - alert: hwmon voltage alarm
235         expr: node_hwmon_in_alarm == 1
236         for: 5m
237         labels:
238           alertgroup: "{{ $labels.instance }}"
239         annotations:
240           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
241           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
242           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
243   - name: io
244     rules:
245       - alert: io pressure
246         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
247         for: 60m
248         labels:
249           alertgroup: "{{ $labels.instance }}"
250         annotations:
251           pressure: "{{ $value | humanizePercentage }}"
252   - name: ipmi
253     rules:
254       - alert: ipmi fan alarm
255         expr: ipmi_fan_speed_state > 0
256         for: 5m
257         labels:
258           alertgroup: "{{ $labels.instance }}"
259         annotations:
260           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
261       - alert: ipmi temperature alarm
262         expr: ipmi_temperature_state > 0
263         for: 5m
264         labels:
265           alertgroup: "{{ $labels.instance }}"
266         annotations:
267           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
268       - alert: ipmi voltage alarm
269         expr: ipmi_voltage_state > 0
270         for: 5m
271         labels:
272           alertgroup: "{{ $labels.instance }}"
273         annotations:
274           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
275       - alert: ipmi power alarm
276         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
277         for: 5m
278         labels:
279           alertgroup: "{{ $labels.instance }}"
280   - name: juniper
281     rules:
282       - alert: juniper red alarms
283         expr: juniper_alarms_red_count > 0
284         for: 5m
285         labels:
286           alertgroup: "{{ $labels.site }}"
287         annotations:
288           alarm_count: "{{ $value }} alarms"
289       - alert: juniper yellow alarms
290         expr: juniper_alarms_yellow_count > 0
291         for: 5m
292         labels:
293           alertgroup: "{{ $labels.site }}"
294         annotations:
295           alarm_count: "{{ $value }} alarms"
296       - alert: juniper cpu alarm
297         expr: junos_route_engine_load_average_five / 2 > 0.5
298         for: 5m
299         labels:
300           alertgroup: "{{ $labels.site }}"
301         annotations:
302           load_average: "{{ $value | humanizePercentage }}"
303       - alert: juniper fan alarm
304         expr: junos_environment_fan_up != 1
305         for: 5m
306         labels:
307           alertgroup: "{{ $labels.site }}"
308       - alert: juniper power alarm
309         expr: junos_environment_power_up != 1
310         for: 5m
311         labels:
312           alertgroup: "{{ $labels.site }}"
313   - name: mail
314     rules:
315       - alert: exim down
316         expr: exim_up == 0
317         for: 5m
318         labels:
319           alertgroup: "{{ $labels.instance }}"
320       - alert: exim queue length
321         expr: exim_queue > ignoring(job) exim_queue_limit
322         for: 60m
323         labels:
324           alertgroup: mail
325         annotations:
326           queue_length: "{{ $value }}"
327       - alert: mailman queue length
328         expr: mailman_queue_length > 200
329         for: 60m
330         labels:
331           alertgroup: mail
332         annotations:
333           queue_length: "{{ $value }}"
334   - name: mdadm
335     rules:
336       - alert: mdadm array inactive
337         expr: node_md_state{state="inactive"} > 0
338         for: 0m
339         labels:
340           alertgroup: "{{ $labels.instance }}"
341         annotations:
342           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
343           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
344           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
345           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
346       - alert: mdadm array degraded
347         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
348         for: 0m
349         labels:
350           alertgroup: "{{ $labels.instance }}"
351         annotations:
352           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
353           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
354           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
355           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
356       - alert: mdadm disk failed
357         expr: node_md_disks{state="failed"} > 0
358         for: 0m
359         labels:
360           alertgroup: "{{ $labels.instance }}"
361         annotations:
362           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
363           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
364           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
365           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
366   - name: memory
367     rules:
368       - alert: low memory
369         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
370         for: 15m
371         labels:
372           alertgroup: "{{ $labels.instance }}"
373         annotations:
374           memory_free: "{{ $value | humanizePercentage }}"
375       - alert: memory pressure
376         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
377         for: 60m
378         labels:
379           alertgroup: "{{ $labels.instance }}"
380         annotations:
381           pressure: "{{ $value | humanizePercentage }}"
382       - alert: oom kill detected
383         expr: increase(node_vmstat_oom_kill[1m]) > 0
384         for: 0m
385         labels:
386           alertgroup: "{{ $labels.instance }}"
387         annotations:
388           new_oom_kills: "{{ $value }}"
389   - name: mysql
390     rules:
391       - alert: mysql down
392         expr: mysql_up == 0
393         for: 1m
394         labels:
395           alertgroup: "{{ $labels.instance }}"
396       - alert: mysql connection limit
397         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
398         for: 1m
399         labels:
400           alertgroup: "{{ $labels.instance }}"
401         annotations:
402           connections_used: "{{ $value | humanizePercentage }}"
403   - name: network
404     rules:
405       - alert: interface transmit rate
406         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
407         for: 5m
408         labels:
409           alertgroup: "{{ $labels.instance }}"
410         annotations:
411           bandwidth_used: "{{ $value | humanizePercentage }}"
412       - alert: interface receive rate
413         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
414         for: 5m
415         labels:
416           alertgroup: "{{ $labels.instance }}"
417         annotations:
418           bandwidth_used: "{{ $value | humanizePercentage }}"
419       - alert: interface transmit errors
420         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
421         for: 5m
422         labels:
423           alertgroup: "{{ $labels.instance }}"
424         annotations:
425           error_rate: "{{ $value | humanizePercentage }}"
426       - alert: wireguard interface transmit errors
427         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
428         for: 1h
429         labels:
430           alertgroup: "{{ $labels.instance }}"
431         annotations:
432           error_rate: "{{ $value | humanizePercentage }}"
433       - alert: interface receive errors
434         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
435         for: 5m
436         labels:
437           alertgroup: "{{ $labels.instance }}"
438         annotations:
439           error_rate: "{{ $value | humanizePercentage }}"
440       - alert: conntrack entries
441         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
442         for: 5m
443         labels:
444           alertgroup: "{{ $labels.instance }}"
445         annotations:
446           entries_used: "{{ $value | humanizePercentage }}"
447   - name: nominatim
448     rules:
449       - alert: nominatim replication delay
450         expr: nominatim_replication_delay > 10800
451         for: 1h
452         labels:
453           alertgroup: nominatim
454         annotations:
455           delay: "{{ $value | humanizeDuration }}"
456   - name: overpass
457     rules:
458       - alert: overpass osm database age
459         expr: overpass_database_age_seconds{database="osm"} > 3600
460         for: 1h
461         labels:
462           alertgroup: overpass
463         annotations:
464           age: "{{ $value | humanizeDuration }}"
465       - alert: overpass area database age
466         expr: overpass_database_age_seconds{database="area"} > 86400
467         for: 1h
468         labels:
469           alertgroup: overpass
470         annotations:
471           age: "{{ $value | humanizeDuration }}"
472   - name: passenger
473     rules:
474       - alert: passenger down
475         expr: passenger_up == 0
476         for: 5m
477         labels:
478           alertgroup: "{{ $labels.instance }}"
479       - alert: passenger queuing
480         expr: passenger_top_level_request_queue > 0
481         for: 5m
482         labels:
483           alertgroup: "{{ $labels.instance }}"
484       - alert: passenger application queuing
485         expr: passenger_app_request_queue > 0
486         for: 5m
487         labels:
488           alertgroup: "{{ $labels.instance }}"
489   - name: planet
490     rules:
491       - alert: planet dump overdue
492         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
493         for: 24h
494         labels:
495           alertgroup: planet
496         annotations:
497           overdue_by: "{{ $value | humanizeDuration }}"
498       - alert: notes dump overdue
499         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
500         for: 6h
501         labels:
502           alertgroup: planet
503         annotations:
504           overdue_by: "{{ $value | humanizeDuration }}"
505       - alert: daily replication feed delayed
506         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
507         for: 3h
508         labels:
509           alertgroup: planet
510         annotations:
511           delayed_by: "{{ $value | humanizeDuration }}"
512       - alert: hourly replication feed delayed
513         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
514         for: 30m
515         labels:
516           alertgroup: planet
517         annotations:
518           delayed_by: "{{ $value | humanizeDuration }}"
519       - alert: minutely replication feed delayed
520         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
521         for: 5m
522         labels:
523           alertgroup: planet
524         annotations:
525           delayed_by: "{{ $value | humanizeDuration }}"
526       - alert: changeset replication feed delayed
527         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
528         for: 5m
529         labels:
530           alertgroup: planet
531         annotations:
532           delayed_by: "{{ $value | humanizeDuration }}"
533   - name: postgresql
534     rules:
535       - alert: postgresql down
536         expr: pg_up == 0
537         for: 1m
538         labels:
539           alertgroup: "{{ $labels.instance }}"
540       - alert: postgresql replication delay
541         expr: pg_replication_lag_seconds > 30
542         for: 15m
543         labels:
544           alertgroup: "{{ $labels.instance }}"
545         annotations:
546           delay: "{{ $value | humanizeDuration }}"
547       - alert: postgresql connection limit
548         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
549         for: 1m
550         labels:
551           alertgroup: "{{ $labels.instance }}"
552         annotations:
553           connections_used: "{{ $value | humanizePercentage }}"
554       - alert: postgresql deadlocks
555         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
556         for: 0m
557         labels:
558           alertgroup: "{{ $labels.instance }}"
559         annotations:
560           new_deadlocks: "{{ $value }}"
561       - alert: postgresql slow queries
562         expr: pg_slow_queries > 0
563         for: 5m
564         labels:
565           alertgroup: "{{ $labels.instance }}"
566         annotations:
567           queries: "{{ $value }}"
568       - alert: postgresql idle transactions
569         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
570         for: 5m
571         labels:
572           alertgroup: "{{ $labels.instance }}"
573         annotations:
574           queries: "{{ $value }}"
575   - name: prometheus
576     rules:
577       - alert: prometheus configuration error
578         expr: prometheus_config_last_reload_successful == 0
579         for: 10m
580         labels:
581           alertgroup: "prometheus"
582       - alert: prometheus target missing
583         expr: up == 0
584         for: 10m
585         labels:
586           alertgroup: "prometheus"
587   - name: raid
588     rules:
589       - alert: raid controller battery failed
590         expr: ohai_controller_info{battery_status="failed"} > 0
591         for: 5m
592         labels:
593           alertgroup: "{{ $labels.instance }}"
594       - alert: raid controller battery recharging
595         expr: ohai_controller_info{battery_status="recharging"} > 0
596         for: 4h
597         labels:
598           alertgroup: "{{ $labels.instance }}"
599       - alert: raid array degraded
600         expr: ohai_array_info{status="degraded"} > 0
601         for: 5m
602         labels:
603           alertgroup: "{{ $labels.instance }}"
604       - alert: raid disk failed
605         expr: ohai_disk_info{status="failed"} > 0
606         for: 5m
607         labels:
608           alertgroup: "{{ $labels.instance }}"
609   - name: rasdaemon
610     rules:
611       - alert: memory controller errors
612         expr: increase(rasdaemon_mc_events_total[1m]) > 0
613         for: 0m
614         labels:
615           alertgroup: "{{ $labels.instance }}"
616         annotations:
617           new_errors: "{{ $value }}"
618       - alert: pcie aer errors
619         expr: increase(rasdaemon_aer_events_total[1m]) > 0
620         for: 0m
621         labels:
622           alertgroup: "{{ $labels.instance }}"
623         annotations:
624           new_errors: "{{ $value }}"
625   - name: smart
626     rules:
627       - alert: smart failure
628         expr: smart_health_status == 0
629         for: 60m
630         labels:
631           alertgroup: "{{ $labels.instance }}"
632       - alert: smart ssd wearout approaching
633         expr: smart_percentage_used / 100 >= 0.8
634         for: 60m
635         labels:
636           alertgroup: "{{ $labels.instance }}"
637         annotations:
638           percentage_used: "{{ $value | humanizePercentage }}"
639   - name: smokeping
640     rules:
641       - alert: packet loss
642         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
643         for: 10m
644         labels:
645           alertgroup: smokeping
646         annotations:
647           loss_rate: "{{ $value | humanizePercentage }}"
648   - name: snmp
649     rules:
650       - alert: snmp pdus missing
651         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
652         for: 15m
653         labels:
654           alertgroup: snmp
655         annotations:
656           missing_pdus: "{{ $value }}"
657   - name: ssl
658     rules:
659       - alert: ssl certificate probe failed
660         expr: ssl_probe_success == 0
661         for: 60m
662         labels:
663           alertgroup: ssl
664       - alert: ssl certificate expiry
665         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
666         for: 0m
667         labels:
668           alertgroup: ssl
669         annotations:
670           expires_in: "{{ $value | humanizeDuration }}"
671       - alert: ssl certificate revoked
672         expr: ssl_ocsp_response_status == 1
673         for: 0m
674         labels:
675           alertgroup: ssl
676       - alert: ocsp status unknown
677         expr: ssl_ocsp_response_status == 1
678         for: 0m
679         labels:
680           alertgroup: ssl
681   - name: statuscake
682     rules:
683       - alert: statuscake uptime check failing
684         expr: statuscake_paused == 0 and statuscake_up == 0
685         for: 10m
686         labels:
687           alertgroup: statuscake
688   - name: systemd
689     rules:
690       - alert: systemd failed service
691         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
692         for: 5m
693         labels:
694           alertgroup: "{{ $labels.instance }}"
695       - alert: systemd failed chef client service
696         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
697         for: 0m
698         labels:
699           alertgroup: "{{ $labels.instance }}"
700   - name: taginfo
701     rules:
702       - alert: taginfo planet age
703         expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
704         for: 0m
705         labels:
706           alertgroup: taginfo
707         annotations:
708           age: "{{ $value | humanizeDuration }}"
709       - alert: taginfo database age
710         expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
711         for: 0m
712         labels:
713           alertgroup: taginfo
714         annotations:
715           age: "{{ $value | humanizeDuration }}"
716       - alert: taginfo database size
717         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
718         for: 30m
719         labels:
720           alertgroup: taginfo
721         annotations:
722           size_change: "{{ $value | humanizePercentage }}"
723   - name: tile
724     rules:
725       - alert: renderd replication delay
726         expr: renderd_replication_delay > 120
727         for: 15m
728         labels:
729           alertgroup: tile
730         annotations:
731           delay: "{{ $value | humanizeDuration }}"
732       - alert: missed tile rate
733         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
734         for: 5m
735         labels:
736           alertgroup: tile
737         annotations:
738           miss_rate: "{{ $value | humanizePercentage }}"
739       - alert: tile render rate
740         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
741         for: 15m
742         labels:
743           alertgroup: tile
744         annotations:
745           render_rate: "{{ $value }} tiles/s"
746   - name: time
747     rules:
748       - alert: clock not synchronising
749         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
750         for: 5m
751         labels:
752           alertgroup: "{{ $labels.instance }}"
753       - alert: clock skew detected
754         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
755         for: 5m
756         labels:
757           alertgroup: "{{ $labels.instance }}"
758         annotations:
759           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
760   - name: web
761     rules:
762       - alert: web error rate
763         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
764         for: 5m
765         labels:
766           alertgroup: web
767         annotations:
768           error_rate: "{{ $value | humanizePercentage }}"
769       - alert: job processing rate
770         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
771         for: 1h
772         labels:
773           alertgroup: web
774         annotations:
775           job_processing_rate: "{{ $value | humanizePercentage }}"