]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
a52901233210ae425d632652578d92f4badbdf75
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: alertmanager
5     rules:
6       - alert: prometheus target missing
7         expr: up == 0
8         for: 10m
9         labels:
10           alertgroup: "prometheus"
11   - name: amsterdam
12     rules:
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site current draw
21         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}A"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 5m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache low request rate
56         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
57         for: 15m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           request_rate: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
101         for: 15m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: postgres replication delay
109         expr: pg_replication_lag_seconds > 5
110         for: 5m
111         labels:
112           alertgroup: database
113         annotations:
114           delay: "{{ $value | humanizeDuration }}"
115   - name: fastly
116     rules:
117       - alert: error rate
118         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
119         for: 15m
120         labels:
121           alertgroup: fastly
122         annotations:
123           error_rate: "{{ $value | humanizePercentage }}"
124   - name: filesystem
125     rules:
126       - alert: readonly filesystem
127         expr: node_filesystem_readonly == 1
128         for: 0m
129         labels:
130           alertgroup: "{{ $labels.instance }}"
131       - alert: filesystem low on space
132         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
133         for: 5m
134         labels:
135           alertgroup: "{{ $labels.instance }}"
136         annotations:
137           percentage_free: "{{ $value | humanizePercentage }}"
138           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
139           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
140       - alert: filesystem low on inodes
141         expr: node_filesystem_files_free / node_filesystem_files < 0.1
142         for: 5m
143         labels:
144           alertgroup: "{{ $labels.instance }}"
145         annotations:
146           percentage_free: "{{ $value | humanizePercentage }}"
147           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
148           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
149   - name: hwmon
150     rules:
151       - alert: hwmon fan alarm
152         expr: node_hwmon_fan_alarm == 1
153         for: 5m
154         labels:
155           alertgroup: "{{ $labels.instance }}"
156         annotations:
157           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
158           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
159       - alert: hwmon temperature alarm
160         expr: node_hwmon_temp_alarm == 1
161         for: 5m
162         labels:
163           alertgroup: "{{ $labels.instance }}"
164         annotations:
165           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
166           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
167           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
168       - alert: hwmon voltage alarm
169         expr: node_hwmon_in_alarm == 1
170         for: 5m
171         labels:
172           alertgroup: "{{ $labels.instance }}"
173         annotations:
174           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
175           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
176           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
177   - name: io
178     rules:
179       - alert: io pressure
180         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
181         for: 60m
182         labels:
183           alertgroup: "{{ $labels.instance }}"
184         annotations:
185           pressure: "{{ $value | humanizePercentage }}"
186   - name: ipmi
187     rules:
188       - alert: ipmi fan alarm
189         expr: ipmi_fan_speed_state > 0
190         for: 5m
191         labels:
192           alertgroup: "{{ $labels.instance }}"
193         annotations:
194           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
195       - alert: ipmi temperature alarm
196         expr: ipmi_temperature_state > 0
197         for: 5m
198         labels:
199           alertgroup: "{{ $labels.instance }}"
200         annotations:
201           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
202       - alert: ipmi voltage alarm
203         expr: ipmi_voltage_state > 0
204         for: 5m
205         labels:
206           alertgroup: "{{ $labels.instance }}"
207         annotations:
208           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
209       - alert: ipmi power alarm
210         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
211         for: 5m
212         labels:
213           alertgroup: "{{ $labels.instance }}"
214   - name: juniper
215     rules:
216       - alert: juniper fan alarm
217         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
218         for: 5m
219         labels:
220           alertgroup: "{{ $labels.site }}"
221       - alert: juniper power alarm
222         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
223         for: 5m
224         labels:
225           alertgroup: "{{ $labels.site }}"
226   - name: mail
227     rules:
228       - alert: exim queue length
229         expr: exim_queue > exim_queue_limit
230         for: 60m
231         labels:
232           alertgroup: mail
233         annotations:
234           queue_length: "{{ $value }}"
235       - alert: mailman queue length
236         expr: mailman_queue_length > 200
237         for: 60m
238         labels:
239           alertgroup: mail
240         annotations:
241           queue_length: "{{ $value }}"
242   - name: mdadm
243     rules:
244       - alert: mdadm array inactive
245         expr: node_md_state{state="inactive"} > 0
246         for: 0m
247         labels:
248           alertgroup: "{{ $labels.instance }}"
249         annotations:
250           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
251           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
252           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
253           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
254       - alert: mdadm array degraded
255         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
256         for: 0m
257         labels:
258           alertgroup: "{{ $labels.instance }}"
259         annotations:
260           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
261           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
262           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
263           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
264       - alert: mdadm disk failed
265         expr: node_md_disks{state="failed"} > 0
266         for: 0m
267         labels:
268           alertgroup: "{{ $labels.instance }}"
269         annotations:
270           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
271           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
272           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
273           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
274   - name: memory
275     rules:
276       - alert: low memory
277         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
278         for: 15m
279         labels:
280           alertgroup: "{{ $labels.instance }}"
281         annotations:
282           memory_free: "{{ $value | humanizePercentage }}"
283       - alert: memory pressure
284         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
285         for: 60m
286         labels:
287           alertgroup: "{{ $labels.instance }}"
288         annotations:
289           pressure: "{{ $value | humanizePercentage }}"
290       - alert: oom kill detected
291         expr: increase(node_vmstat_oom_kill[1m]) > 0
292         for: 0m
293         labels:
294           alertgroup: "{{ $labels.instance }}"
295         annotations:
296           new_oom_kills: "{{ $value }}"
297   - name: network
298     rules:
299       - alert: interface transmit rate
300         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
301         for: 5m
302         labels:
303           alertgroup: "{{ $labels.instance }}"
304         annotations:
305           bandwidth_used: "{{ $value | humanizePercentage }}"
306       - alert: interface receive rate
307         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
308         for: 5m
309         labels:
310           alertgroup: "{{ $labels.instance }}"
311         annotations:
312           bandwidth_used: "{{ $value | humanizePercentage }}"
313       - alert: interface transmit errors
314         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
315         for: 5m
316         labels:
317           alertgroup: "{{ $labels.instance }}"
318         annotations:
319           error_rate: "{{ $value | humanizePercentage }}"
320       - alert: interface transmit errors
321         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
322         for: 1h
323         labels:
324           alertgroup: "{{ $labels.instance }}"
325         annotations:
326           error_rate: "{{ $value | humanizePercentage }}"
327       - alert: interface receive errors
328         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
329         for: 5m
330         labels:
331           alertgroup: "{{ $labels.instance }}"
332         annotations:
333           error_rate: "{{ $value | humanizePercentage }}"
334       - alert: conntrack entries
335         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
336         for: 5m
337         labels:
338           alertgroup: "{{ $labels.instance }}"
339         annotations:
340           entries_used: "{{ $value | humanizePercentage }}"
341   - name: planet
342     rules:
343       - alert: planet dump overdue
344         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
345         for: 24h
346         labels:
347           alertgroup: planet
348         annotations:
349           overdue_by: "{{ $value | humanizeDuration }}"
350       - alert: notes dump overdue
351         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
352         for: 6h
353         labels:
354           alertgroup: planet
355         annotations:
356           overdue_by: "{{ $value | humanizeDuration }}"
357       - alert: daily replication feed delayed
358         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
359         for: 3h
360         labels:
361           alertgroup: planet
362         annotations:
363           delayed_by: "{{ $value | humanizeDuration }}"
364       - alert: hourly replication feed delayed
365         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
366         for: 30m
367         labels:
368           alertgroup: planet
369         annotations:
370           delayed_by: "{{ $value | humanizeDuration }}"
371       - alert: minutely replication feed delayed
372         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
373         for: 5m
374         labels:
375           alertgroup: planet
376         annotations:
377           delayed_by: "{{ $value | humanizeDuration }}"
378       - alert: changeset replication feed delayed
379         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
380         for: 5m
381         labels:
382           alertgroup: planet
383         annotations:
384           delayed_by: "{{ $value | humanizeDuration }}"
385   - name: postgresql
386     rules:
387       - alert: postgresql down
388         expr: pg_up == 0
389         for: 1m
390         labels:
391           alertgroup: "{{ $labels.instance }}"
392       - alert: postgresql replication delay
393         expr: pg_replication_lag_seconds > 5
394         for: 1m
395         labels:
396           alertgroup: "{{ $labels.instance }}"
397         annotations:
398           delay: "{{ $value | humanizeDuration }}"
399       - alert: postgresql connection limit
400         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
401         for: 1m
402         labels:
403           alertgroup: "{{ $labels.instance }}"
404         annotations:
405           connections_used: "{{ $value | humanizePercentage }}"
406       - alert: postgresql deadlocks
407         expr: increase(pg_stat_database_deadlocks[1m]) > 5
408         for: 0m
409         labels:
410           alertgroup: "{{ $labels.instance }}"
411         annotations:
412           new_deadlocks: "{{ $value }}"
413       - alert: postgresql slow queries
414         expr: pg_slow_queries > 0
415         for: 5m
416         labels:
417           alertgroup: "{{ $labels.instance }}"
418         annotations:
419           queries: "{{ $value }}"
420   - name: smart
421     rules:
422       - alert: smart failure
423         expr: smart_health_status == 0
424         for: 60m
425         labels:
426           alertgroup: "{{ $labels.instance }}"
427       - alert: smart ssd wearout approaching
428         expr: smart_percentage_used >= 90
429         for: 60m
430         labels:
431           alertgroup: "{{ $labels.instance }}"
432         annotations:
433           percentage_used: "{{ $value | humanizePercentage }}"
434   - name: ssl
435     rules:
436       - alert: ssl certificate probe failed
437         expr: ssl_probe_success == 0
438         for: 60m
439         labels:
440           alertgroup: ssl
441       - alert: ssl certificate expiry
442         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
443         for: 0m
444         labels:
445           alertgroup: ssl
446         annotations:
447           expires_in: "{{ $value | humanizeDuration }}"
448       - alert: ssl certificate revoked
449         expr: ssl_ocsp_response_status == 1
450         for: 0m
451         labels:
452           alertgroup: ssl
453       - alert: ocsp status unknown
454         expr: ssl_ocsp_response_status == 1
455         for: 0m
456         labels:
457           alertgroup: ssl
458   - name: statuscake
459     rules:
460       - alert: statuscake uptime check failing
461         expr: statuscake_uptime{status="down",paused="false"} > 0
462         for: 10m
463         labels:
464           alertgroup: statuscake
465   - name: systemd
466     rules:
467       - alert: systemd failed service
468         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
469         for: 5m
470         labels:
471           alertgroup: "{{ $labels.instance }}"
472       - alert: systemd failed service
473         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
474         for: 6h
475         labels:
476           alertgroup: "{{ $labels.instance }}"
477   - name: tile
478     rules:
479       - alert: renderd replication delay
480         expr: renderd_replication_delay > 120
481         for: 15m
482         labels:
483           alertgroup: tile
484         annotations:
485           delay: "{{ $value | humanizeDuration }}"
486       - alert: missed tile rate
487         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
488         for: 5m
489         labels:
490           alertgroup: tile
491         annotations:
492           miss_rate: "{{ $value | humanizePercentage }}"
493   - name: time
494     rules:
495       - alert: clock not synchronising
496         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
497         for: 5m
498         labels:
499           alertgroup: "{{ $labels.instance }}"
500       - alert: clock skew detected
501         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
502         for: 5m
503         labels:
504           alertgroup: "{{ $labels.instance }}"
505         annotations:
506           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
507   - name: web
508     rules:
509       - alert: web error rate
510         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
511         for: 5m
512         labels:
513           alertgroup: web
514         annotations:
515           error_rate: "{{ $value | humanizePercentage }}"
516       - alert: job processing rate
517         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
518         for: 15m
519         labels:
520           alertgroup: web
521         annotations:
522           job_processing_rate: "{{ $value | humanizePercentage }}"