]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
3c448cc2a3a515e00090d7bf9eab34c9b1d01c1c
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: uplink
7         expr: ifOperStatus{site="amsterdam",ifName=~"ge-[01]/2/2"} != 1
8         for: 6m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           status: "{{ $value }}"
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
15         for: 6m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site power
21         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
22         for: 6m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}kVA"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 26
29         for: 6m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 6m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
94         for: 60m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag > 30
103         for: 15m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: discourse
109     rules:
110       - alert: discourse job failure rate
111         expr: rate(discourse_job_failures[5m]) > 0
112         for: 5m
113         labels:
114           alertgroup: discourse
115         annotations:
116           failure_rate: "{{ $value }} jobs/s"
117   - name: dublin
118     rules:
119       - alert: uplink
120         expr: ifOperStatus{site="dublin",ifName=~"ge-[01]/2/2"} != 1
121         for: 6m
122         labels:
123           alertgroup: "dublin"
124         annotations:
125           status: "{{ $value }}"
126       - alert: pdu current draw
127         expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
128         for: 6m
129         labels:
130           alertgroup: "dublin"
131         annotations:
132           current: "{{ $value | humanize }}A"
133       - alert: site power
134         expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
135         for: 6m
136         labels:
137           alertgroup: "dublin"
138         annotations:
139           current: "{{ $value | humanize }}kVA"
140       - alert: site temperature
141         expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
142         for: 6m
143         labels:
144           alertgroup: "dublin"
145         annotations:
146           temperature: "{{ $value | humanize }}C"
147       - alert: site humidity
148         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
149         for: 6m
150         labels:
151           alertgroup: "dublin"
152         annotations:
153           humidity: "{{ $value | humanizePercentage }}"
154   - name: fastly
155     rules:
156       - alert: fastly error rate
157         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
158         for: 15m
159         labels:
160           alertgroup: fastly
161         annotations:
162           error_rate: "{{ $value | humanizePercentage }}"
163       - alert: fastly healthcheck failing
164         expr: count(fastly_healthcheck_status == 0) by (service) > 0
165         for: 15m
166         labels:
167           alertgroup: fastly
168       - alert: multiple fastly healthchecks failing
169         expr: count(fastly_healthcheck_status == 0) by (service) > 4
170         for: 5m
171         labels:
172           alertgroup: fastly
173   - name: filesystem
174     rules:
175       - alert: readonly filesystem
176         expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
177         for: 0m
178         labels:
179           alertgroup: "{{ $labels.instance }}"
180       - alert: filesystem low on space
181         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
182         for: 5m
183         labels:
184           alertgroup: "{{ $labels.instance }}"
185         annotations:
186           percentage_free: "{{ $value | humanizePercentage }}"
187           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
188           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
189       - alert: filesystem low on inodes
190         expr: node_filesystem_files_free / node_filesystem_files < 0.1
191         for: 5m
192         labels:
193           alertgroup: "{{ $labels.instance }}"
194         annotations:
195           percentage_free: "{{ $value | humanizePercentage }}"
196           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
197           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
198   - name: hwmon
199     rules:
200       - alert: hwmon fan alarm
201         expr: node_hwmon_fan_alarm == 1
202         for: 5m
203         labels:
204           alertgroup: "{{ $labels.instance }}"
205         annotations:
206           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
207           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
208       - alert: hwmon temperature alarm
209         expr: node_hwmon_temp_alarm == 1
210         for: 5m
211         labels:
212           alertgroup: "{{ $labels.instance }}"
213         annotations:
214           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
215           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
216           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
217       - alert: hwmon voltage alarm
218         expr: node_hwmon_in_alarm == 1
219         for: 5m
220         labels:
221           alertgroup: "{{ $labels.instance }}"
222         annotations:
223           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
224           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
225           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
226   - name: io
227     rules:
228       - alert: io pressure
229         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
230         for: 60m
231         labels:
232           alertgroup: "{{ $labels.instance }}"
233         annotations:
234           pressure: "{{ $value | humanizePercentage }}"
235   - name: ipmi
236     rules:
237       - alert: ipmi fan alarm
238         expr: ipmi_fan_speed_state > 0
239         for: 5m
240         labels:
241           alertgroup: "{{ $labels.instance }}"
242         annotations:
243           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
244       - alert: ipmi temperature alarm
245         expr: ipmi_temperature_state > 0
246         for: 5m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249         annotations:
250           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
251       - alert: ipmi voltage alarm
252         expr: ipmi_voltage_state > 0
253         for: 5m
254         labels:
255           alertgroup: "{{ $labels.instance }}"
256         annotations:
257           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
258       - alert: ipmi power alarm
259         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
260         for: 5m
261         labels:
262           alertgroup: "{{ $labels.instance }}"
263   - name: juniper
264     rules:
265       - alert: juniper cpu alarm
266         expr: jnxOperating5MinLoadAvg{jnxOperatingContentsIndex="9"} / 200 > 0.5
267         for: 5m
268         labels:
269           alertgroup: "{{ $labels.site }}"
270         annotations:
271           load_average: "{{ $value | humanizePercentage }}"
272       - alert: juniper fan alarm
273         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!~"running.*"} > 0
274         for: 5m
275         labels:
276           alertgroup: "{{ $labels.site }}"
277       - alert: juniper power alarm
278         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!~"running.*"} > 0
279         for: 5m
280         labels:
281           alertgroup: "{{ $labels.site }}"
282   - name: mail
283     rules:
284       - alert: exim down
285         expr: exim_up == 0
286         for: 5m
287         labels:
288           alertgroup: "{{ $labels.instance }}"
289       - alert: exim queue length
290         expr: exim_queue > ignoring(job) exim_queue_limit
291         for: 60m
292         labels:
293           alertgroup: mail
294         annotations:
295           queue_length: "{{ $value }}"
296       - alert: mailman queue length
297         expr: mailman_queue_length > 200
298         for: 60m
299         labels:
300           alertgroup: mail
301         annotations:
302           queue_length: "{{ $value }}"
303   - name: mdadm
304     rules:
305       - alert: mdadm array inactive
306         expr: node_md_state{state="inactive"} > 0
307         for: 0m
308         labels:
309           alertgroup: "{{ $labels.instance }}"
310         annotations:
311           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
312           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
313           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
314           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
315       - alert: mdadm array degraded
316         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
317         for: 0m
318         labels:
319           alertgroup: "{{ $labels.instance }}"
320         annotations:
321           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
322           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
323           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
324           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
325       - alert: mdadm disk failed
326         expr: node_md_disks{state="failed"} > 0
327         for: 0m
328         labels:
329           alertgroup: "{{ $labels.instance }}"
330         annotations:
331           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
332           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
333           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
334           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
335   - name: memory
336     rules:
337       - alert: low memory
338         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
339         for: 15m
340         labels:
341           alertgroup: "{{ $labels.instance }}"
342         annotations:
343           memory_free: "{{ $value | humanizePercentage }}"
344       - alert: memory pressure
345         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
346         for: 60m
347         labels:
348           alertgroup: "{{ $labels.instance }}"
349         annotations:
350           pressure: "{{ $value | humanizePercentage }}"
351       - alert: oom kill detected
352         expr: increase(node_vmstat_oom_kill[1m]) > 0
353         for: 0m
354         labels:
355           alertgroup: "{{ $labels.instance }}"
356         annotations:
357           new_oom_kills: "{{ $value }}"
358   - name: mysql
359     rules:
360       - alert: mysql down
361         expr: mysql_up == 0
362         for: 1m
363         labels:
364           alertgroup: "{{ $labels.instance }}"
365       - alert: mysql connection limit
366         expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
367         for: 1m
368         labels:
369           alertgroup: "{{ $labels.instance }}"
370         annotations:
371           connections_used: "{{ $value | humanizePercentage }}"
372   - name: network
373     rules:
374       - alert: interface transmit rate
375         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
376         for: 5m
377         labels:
378           alertgroup: "{{ $labels.instance }}"
379         annotations:
380           bandwidth_used: "{{ $value | humanizePercentage }}"
381       - alert: interface receive rate
382         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
383         for: 5m
384         labels:
385           alertgroup: "{{ $labels.instance }}"
386         annotations:
387           bandwidth_used: "{{ $value | humanizePercentage }}"
388       - alert: interface transmit errors
389         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
390         for: 5m
391         labels:
392           alertgroup: "{{ $labels.instance }}"
393         annotations:
394           error_rate: "{{ $value | humanizePercentage }}"
395       - alert: wireguard interface transmit errors
396         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
397         for: 1h
398         labels:
399           alertgroup: "{{ $labels.instance }}"
400         annotations:
401           error_rate: "{{ $value | humanizePercentage }}"
402       - alert: interface receive errors
403         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
404         for: 5m
405         labels:
406           alertgroup: "{{ $labels.instance }}"
407         annotations:
408           error_rate: "{{ $value | humanizePercentage }}"
409       - alert: conntrack entries
410         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
411         for: 5m
412         labels:
413           alertgroup: "{{ $labels.instance }}"
414         annotations:
415           entries_used: "{{ $value | humanizePercentage }}"
416   - name: nominatim
417     rules:
418       - alert: nominatim replication delay
419         expr: nominatim_replication_delay > 10800
420         for: 1h
421         labels:
422           alertgroup: nominatim
423         annotations:
424           delay: "{{ $value | humanizeDuration }}"
425   - name: overpass
426     rules:
427       - alert: overpass osm database age
428         expr: overpass_database_age_seconds{database="osm"} > 3600
429         for: 1h
430         labels:
431           alertgroup: overpass
432         annotations:
433           age: "{{ $value | humanizeDuration }}"
434       - alert: overpass area database age
435         expr: overpass_database_age_seconds{database="area"} > 86400
436         for: 1h
437         labels:
438           alertgroup: overpass
439         annotations:
440           age: "{{ $value | humanizeDuration }}"
441   - name: passenger
442     rules:
443       - alert: passenger down
444         expr: passenger_up == 0
445         for: 5m
446         labels:
447           alertgroup: "{{ $labels.instance }}"
448       - alert: passenger queuing
449         expr: passenger_top_level_request_queue > 0
450         for: 5m
451         labels:
452           alertgroup: "{{ $labels.instance }}"
453       - alert: passenger application queuing
454         expr: passenger_app_request_queue > 0
455         for: 5m
456         labels:
457           alertgroup: "{{ $labels.instance }}"
458   - name: planet
459     rules:
460       - alert: planet dump overdue
461         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
462         for: 24h
463         labels:
464           alertgroup: planet
465         annotations:
466           overdue_by: "{{ $value | humanizeDuration }}"
467       - alert: notes dump overdue
468         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
469         for: 6h
470         labels:
471           alertgroup: planet
472         annotations:
473           overdue_by: "{{ $value | humanizeDuration }}"
474       - alert: daily replication feed delayed
475         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
476         for: 3h
477         labels:
478           alertgroup: planet
479         annotations:
480           delayed_by: "{{ $value | humanizeDuration }}"
481       - alert: hourly replication feed delayed
482         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
483         for: 30m
484         labels:
485           alertgroup: planet
486         annotations:
487           delayed_by: "{{ $value | humanizeDuration }}"
488       - alert: minutely replication feed delayed
489         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
490         for: 5m
491         labels:
492           alertgroup: planet
493         annotations:
494           delayed_by: "{{ $value | humanizeDuration }}"
495       - alert: changeset replication feed delayed
496         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
497         for: 5m
498         labels:
499           alertgroup: planet
500         annotations:
501           delayed_by: "{{ $value | humanizeDuration }}"
502   - name: postgresql
503     rules:
504       - alert: postgresql down
505         expr: pg_up == 0
506         for: 1m
507         labels:
508           alertgroup: "{{ $labels.instance }}"
509       - alert: postgresql replication delay
510         expr: pg_replication_lag > 30
511         for: 15m
512         labels:
513           alertgroup: "{{ $labels.instance }}"
514         annotations:
515           delay: "{{ $value | humanizeDuration }}"
516       - alert: postgresql connection limit
517         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
518         for: 1m
519         labels:
520           alertgroup: "{{ $labels.instance }}"
521         annotations:
522           connections_used: "{{ $value | humanizePercentage }}"
523       - alert: postgresql deadlocks
524         expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
525         for: 0m
526         labels:
527           alertgroup: "{{ $labels.instance }}"
528         annotations:
529           new_deadlocks: "{{ $value }}"
530       - alert: postgresql slow queries
531         expr: pg_slow_queries > 0
532         for: 5m
533         labels:
534           alertgroup: "{{ $labels.instance }}"
535         annotations:
536           queries: "{{ $value }}"
537       - alert: postgresql idle transactions
538         expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
539         for: 5m
540         labels:
541           alertgroup: "{{ $labels.instance }}"
542         annotations:
543           queries: "{{ $value }}"
544   - name: prometheus
545     rules:
546       - alert: prometheus configuration error
547         expr: prometheus_config_last_reload_successful == 0
548         for: 10m
549         labels:
550           alertgroup: "prometheus"
551       - alert: prometheus target missing
552         expr: up == 0
553         for: 10m
554         labels:
555           alertgroup: "prometheus"
556   - name: raid
557     rules:
558       - alert: raid controller battery failed
559         expr: ohai_controller_info{battery_status="failed"} > 0
560         for: 5m
561         labels:
562           alertgroup: "{{ $labels.instance }}"
563       - alert: raid controller battery recharging
564         expr: ohai_controller_info{battery_status="recharging"} > 0
565         for: 4h
566         labels:
567           alertgroup: "{{ $labels.instance }}"
568       - alert: raid array degraded
569         expr: ohai_array_info{status="degraded"} > 0
570         for: 5m
571         labels:
572           alertgroup: "{{ $labels.instance }}"
573       - alert: raid disk failed
574         expr: ohai_disk_info{status="failed"} > 0
575         for: 5m
576         labels:
577           alertgroup: "{{ $labels.instance }}"
578   - name: rasdaemon
579     rules:
580       - alert: memory controller errors
581         expr: increase(rasdaemon_mc_events_total[1m]) > 0
582         for: 0m
583         labels:
584           alertgroup: "{{ $labels.instance }}"
585         annotations:
586           new_errors: "{{ $value }}"
587       - alert: pcie aer errors
588         expr: increase(rasdaemon_aer_events_total[1m]) > 0
589         for: 0m
590         labels:
591           alertgroup: "{{ $labels.instance }}"
592         annotations:
593           new_errors: "{{ $value }}"
594   - name: smart
595     rules:
596       - alert: smart failure
597         expr: smart_health_status == 0
598         for: 60m
599         labels:
600           alertgroup: "{{ $labels.instance }}"
601       - alert: smart ssd wearout approaching
602         expr: smart_percentage_used / 100 >= 0.8
603         for: 60m
604         labels:
605           alertgroup: "{{ $labels.instance }}"
606         annotations:
607           percentage_used: "{{ $value | humanizePercentage }}"
608   - name: smokeping
609     rules:
610       - alert: packet loss
611         expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
612         for: 10m
613         labels:
614           alertgroup: smokeping
615         annotations:
616           loss_rate: "{{ $value | humanizePercentage }}"
617   - name: snmp
618     rules:
619       - alert: snmp pdus missing
620         expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
621         for: 15m
622         labels:
623           alertgroup: snmp
624         annotations:
625           missing_pdus: "{{ $value }}"
626   - name: ssl
627     rules:
628       - alert: ssl certificate probe failed
629         expr: ssl_probe_success == 0
630         for: 60m
631         labels:
632           alertgroup: ssl
633       - alert: ssl certificate expiry
634         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
635         for: 0m
636         labels:
637           alertgroup: ssl
638         annotations:
639           expires_in: "{{ $value | humanizeDuration }}"
640       - alert: ssl certificate revoked
641         expr: ssl_ocsp_response_status == 1
642         for: 0m
643         labels:
644           alertgroup: ssl
645       - alert: ocsp status unknown
646         expr: ssl_ocsp_response_status == 1
647         for: 0m
648         labels:
649           alertgroup: ssl
650   - name: statuscake
651     rules:
652       - alert: statuscake uptime check failing
653         expr: statuscake_paused == 0 and statuscake_up == 0
654         for: 10m
655         labels:
656           alertgroup: statuscake
657   - name: systemd
658     rules:
659       - alert: systemd failed service
660         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
661         for: 5m
662         labels:
663           alertgroup: "{{ $labels.instance }}"
664       - alert: systemd failed chef client service
665         expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
666         for: 0m
667         labels:
668           alertgroup: "{{ $labels.instance }}"
669   - name: taginfo
670     rules:
671       - alert: taginfo planet age
672         expr: time() - taginfo_data_from_seconds > 129600
673         for: 0m
674         labels:
675           alertgroup: taginfo
676         annotations:
677           age: "{{ $value | humanizeDuration }}"
678       - alert: taginfo database age
679         expr: time() - taginfo_database_update_finish_seconds > 129600
680         for: 0m
681         labels:
682           alertgroup: taginfo
683         annotations:
684           age: "{{ $value | humanizeDuration }}"
685       - alert: taginfo database size
686         expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
687         for: 30m
688         labels:
689           alertgroup: taginfo
690         annotations:
691           size_change: "{{ $value | humanizePercentage }}"
692   - name: tile
693     rules:
694       - alert: renderd replication delay
695         expr: renderd_replication_delay > 120
696         for: 15m
697         labels:
698           alertgroup: tile
699         annotations:
700           delay: "{{ $value | humanizeDuration }}"
701       - alert: missed tile rate
702         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
703         for: 5m
704         labels:
705           alertgroup: tile
706         annotations:
707           miss_rate: "{{ $value | humanizePercentage }}"
708       - alert: tile render rate
709         expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
710         for: 15m
711         labels:
712           alertgroup: tile
713         annotations:
714           render_rate: "{{ $value }} tiles/s"
715   - name: time
716     rules:
717       - alert: clock not synchronising
718         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
719         for: 5m
720         labels:
721           alertgroup: "{{ $labels.instance }}"
722       - alert: clock skew detected
723         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
724         for: 5m
725         labels:
726           alertgroup: "{{ $labels.instance }}"
727         annotations:
728           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
729   - name: web
730     rules:
731       - alert: web error rate
732         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
733         for: 5m
734         labels:
735           alertgroup: web
736         annotations:
737           error_rate: "{{ $value | humanizePercentage }}"
738       - alert: job processing rate
739         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
740         for: 1h
741         labels:
742           alertgroup: web
743         annotations:
744           job_processing_rate: "{{ $value | humanizePercentage }}"