]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
30a14a471262afc886129a99075e15500452d144
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: amsterdam
5     rules:
6       - alert: pdu current draw
7         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
8         for: 5m
9         labels:
10           alertgroup: "amsterdam"
11         annotations:
12           current: "{{ $value | humanize }}A"
13       - alert: site current draw
14         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site temperature
21         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           temperature: "{{ $value | humanize }}C"
27       - alert: site humidity
28         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           humidity: "{{ $value | humanizePercentage }}"
34   - name: apache
35     rules:
36       - alert: apache down
37         expr: apache_up == 0
38         for: 5m
39         labels:
40           alertgroup: "{{ $labels.instance }}"
41       - alert: apache workers busy
42         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
43         for: 5m
44         labels:
45           alertgroup: "{{ $labels.instance }}"
46         annotations:
47           busy_workers: "{{ $value | humanizePercentage }}"
48       - alert: apache low request rate
49         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
50         for: 15m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           request_rate: "{{ $value | humanizePercentage }}"
55   - name: chef
56     rules:
57       - alert: chef client not running
58         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
59         for: 12h
60         labels:
61           alertgroup: "{{ $labels.instance }}"
62         annotations:
63           down_time: "{{ $value | humanizeDuration }}"
64   - name: cisco
65     rules:
66       - alert: cisco fan alarm
67         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
68         for: 5m
69         labels:
70           alertgroup: "{{ $labels.site }}"
71         annotations:
72           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
73       - alert: cisco temperature alarm
74         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
80       - alert: cisco main power alarm
81         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85       - alert: cisco redundant power alarm
86         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
87         for: 5m
88         labels:
89           alertgroup: "{{ $labels.site }}"
90   - name: cpu
91     rules:
92       - alert: cpu pressure
93         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
94         for: 15m
95         labels:
96           alertgroup: "{{ $labels.instance }}"
97         annotations:
98           pressure: "{{ $value | humanizePercentage }}"
99   - name: database
100     rules:
101       - alert: postgres replication delay
102         expr: pg_replication_lag_seconds > 5
103         for: 5m
104         labels:
105           alertgroup: database
106         annotations:
107           delay: "{{ $value | humanizeDuration }}"
108   - name: fastly
109     rules:
110       - alert: fastly error rate
111         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
112         for: 15m
113         labels:
114           alertgroup: fastly
115         annotations:
116           error_rate: "{{ $value | humanizePercentage }}"
117       - alert: fastly healthcheck failing
118         expr: fastly_healthcheck_status == 0
119         for: 5m
120         labels:
121           alertgroup: fastly
122   - name: filesystem
123     rules:
124       - alert: readonly filesystem
125         expr: node_filesystem_readonly == 1
126         for: 0m
127         labels:
128           alertgroup: "{{ $labels.instance }}"
129       - alert: filesystem low on space
130         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
131         for: 5m
132         labels:
133           alertgroup: "{{ $labels.instance }}"
134         annotations:
135           percentage_free: "{{ $value | humanizePercentage }}"
136           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
137           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
138       - alert: filesystem low on inodes
139         expr: node_filesystem_files_free / node_filesystem_files < 0.1
140         for: 5m
141         labels:
142           alertgroup: "{{ $labels.instance }}"
143         annotations:
144           percentage_free: "{{ $value | humanizePercentage }}"
145           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
146           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
147   - name: hwmon
148     rules:
149       - alert: hwmon fan alarm
150         expr: node_hwmon_fan_alarm == 1
151         for: 5m
152         labels:
153           alertgroup: "{{ $labels.instance }}"
154         annotations:
155           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
156           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
157       - alert: hwmon temperature alarm
158         expr: node_hwmon_temp_alarm == 1
159         for: 5m
160         labels:
161           alertgroup: "{{ $labels.instance }}"
162         annotations:
163           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
164           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
165           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
166       - alert: hwmon voltage alarm
167         expr: node_hwmon_in_alarm == 1
168         for: 5m
169         labels:
170           alertgroup: "{{ $labels.instance }}"
171         annotations:
172           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
173           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
174           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
175   - name: io
176     rules:
177       - alert: io pressure
178         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
179         for: 60m
180         labels:
181           alertgroup: "{{ $labels.instance }}"
182         annotations:
183           pressure: "{{ $value | humanizePercentage }}"
184   - name: ipmi
185     rules:
186       - alert: ipmi fan alarm
187         expr: ipmi_fan_speed_state > 0
188         for: 5m
189         labels:
190           alertgroup: "{{ $labels.instance }}"
191         annotations:
192           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
193       - alert: ipmi temperature alarm
194         expr: ipmi_temperature_state > 0
195         for: 5m
196         labels:
197           alertgroup: "{{ $labels.instance }}"
198         annotations:
199           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
200       - alert: ipmi voltage alarm
201         expr: ipmi_voltage_state > 0
202         for: 5m
203         labels:
204           alertgroup: "{{ $labels.instance }}"
205         annotations:
206           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
207       - alert: ipmi power alarm
208         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
209         for: 5m
210         labels:
211           alertgroup: "{{ $labels.instance }}"
212   - name: juniper
213     rules:
214       - alert: juniper fan alarm
215         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
216         for: 5m
217         labels:
218           alertgroup: "{{ $labels.site }}"
219       - alert: juniper power alarm
220         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
221         for: 5m
222         labels:
223           alertgroup: "{{ $labels.site }}"
224   - name: mail
225     rules:
226       - alert: exim queue length
227         expr: exim_queue > exim_queue_limit
228         for: 60m
229         labels:
230           alertgroup: mail
231         annotations:
232           queue_length: "{{ $value }}"
233       - alert: mailman queue length
234         expr: mailman_queue_length > 200
235         for: 60m
236         labels:
237           alertgroup: mail
238         annotations:
239           queue_length: "{{ $value }}"
240   - name: mdadm
241     rules:
242       - alert: mdadm array inactive
243         expr: node_md_state{state="inactive"} > 0
244         for: 0m
245         labels:
246           alertgroup: "{{ $labels.instance }}"
247         annotations:
248           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
249           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
250           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
251           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
252       - alert: mdadm array degraded
253         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
254         for: 0m
255         labels:
256           alertgroup: "{{ $labels.instance }}"
257         annotations:
258           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
259           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
260           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
261           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
262       - alert: mdadm disk failed
263         expr: node_md_disks{state="failed"} > 0
264         for: 0m
265         labels:
266           alertgroup: "{{ $labels.instance }}"
267         annotations:
268           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
269           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
270           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
271           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
272   - name: memory
273     rules:
274       - alert: low memory
275         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
276         for: 15m
277         labels:
278           alertgroup: "{{ $labels.instance }}"
279         annotations:
280           memory_free: "{{ $value | humanizePercentage }}"
281       - alert: memory pressure
282         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
283         for: 60m
284         labels:
285           alertgroup: "{{ $labels.instance }}"
286         annotations:
287           pressure: "{{ $value | humanizePercentage }}"
288       - alert: oom kill detected
289         expr: increase(node_vmstat_oom_kill[1m]) > 0
290         for: 0m
291         labels:
292           alertgroup: "{{ $labels.instance }}"
293         annotations:
294           new_oom_kills: "{{ $value }}"
295   - name: network
296     rules:
297       - alert: interface transmit rate
298         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
299         for: 5m
300         labels:
301           alertgroup: "{{ $labels.instance }}"
302         annotations:
303           bandwidth_used: "{{ $value | humanizePercentage }}"
304       - alert: interface receive rate
305         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
306         for: 5m
307         labels:
308           alertgroup: "{{ $labels.instance }}"
309         annotations:
310           bandwidth_used: "{{ $value | humanizePercentage }}"
311       - alert: interface transmit errors
312         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
313         for: 5m
314         labels:
315           alertgroup: "{{ $labels.instance }}"
316         annotations:
317           error_rate: "{{ $value | humanizePercentage }}"
318       - alert: interface transmit errors
319         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
320         for: 1h
321         labels:
322           alertgroup: "{{ $labels.instance }}"
323         annotations:
324           error_rate: "{{ $value | humanizePercentage }}"
325       - alert: interface receive errors
326         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
327         for: 5m
328         labels:
329           alertgroup: "{{ $labels.instance }}"
330         annotations:
331           error_rate: "{{ $value | humanizePercentage }}"
332       - alert: conntrack entries
333         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
334         for: 5m
335         labels:
336           alertgroup: "{{ $labels.instance }}"
337         annotations:
338           entries_used: "{{ $value | humanizePercentage }}"
339   - name: planet
340     rules:
341       - alert: planet dump overdue
342         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
343         for: 24h
344         labels:
345           alertgroup: planet
346         annotations:
347           overdue_by: "{{ $value | humanizeDuration }}"
348       - alert: notes dump overdue
349         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
350         for: 6h
351         labels:
352           alertgroup: planet
353         annotations:
354           overdue_by: "{{ $value | humanizeDuration }}"
355       - alert: daily replication feed delayed
356         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
357         for: 3h
358         labels:
359           alertgroup: planet
360         annotations:
361           delayed_by: "{{ $value | humanizeDuration }}"
362       - alert: hourly replication feed delayed
363         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
364         for: 30m
365         labels:
366           alertgroup: planet
367         annotations:
368           delayed_by: "{{ $value | humanizeDuration }}"
369       - alert: minutely replication feed delayed
370         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
371         for: 5m
372         labels:
373           alertgroup: planet
374         annotations:
375           delayed_by: "{{ $value | humanizeDuration }}"
376       - alert: changeset replication feed delayed
377         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
378         for: 5m
379         labels:
380           alertgroup: planet
381         annotations:
382           delayed_by: "{{ $value | humanizeDuration }}"
383   - name: postgresql
384     rules:
385       - alert: postgresql down
386         expr: pg_up == 0
387         for: 1m
388         labels:
389           alertgroup: "{{ $labels.instance }}"
390       - alert: postgresql replication delay
391         expr: pg_replication_lag_seconds > 5
392         for: 1m
393         labels:
394           alertgroup: "{{ $labels.instance }}"
395         annotations:
396           delay: "{{ $value | humanizeDuration }}"
397       - alert: postgresql connection limit
398         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
399         for: 1m
400         labels:
401           alertgroup: "{{ $labels.instance }}"
402         annotations:
403           connections_used: "{{ $value | humanizePercentage }}"
404       - alert: postgresql deadlocks
405         expr: increase(pg_stat_database_deadlocks[1m]) > 5
406         for: 0m
407         labels:
408           alertgroup: "{{ $labels.instance }}"
409         annotations:
410           new_deadlocks: "{{ $value }}"
411       - alert: postgresql slow queries
412         expr: pg_slow_queries > 0
413         for: 5m
414         labels:
415           alertgroup: "{{ $labels.instance }}"
416         annotations:
417           queries: "{{ $value }}"
418   - name: prometheus
419     rules:
420       - alert: prometheus configuration error
421         expr: prometheus_config_last_reload_successful == 0
422         for: 10m
423         labels:
424           alertgroup: "prometheus"
425       - alert: prometheus target missing
426         expr: up == 0
427         for: 10m
428         labels:
429           alertgroup: "prometheus"
430   - name: raid
431     rules:
432       - alert: raid array degraded
433         expr: ohai_array_info{status="degraded"} > 0
434         for: 5m
435         labels:
436           alertgroup: "{{ $labels.instance }}"
437       - alert: raid disk failed
438         expr: ohai_disk_info{status="failed"} > 0
439         for: 5m
440         labels:
441           alertgroup: "{{ $labels.instance }}"
442   - name: smart
443     rules:
444       - alert: smart failure
445         expr: smart_health_status == 0
446         for: 60m
447         labels:
448           alertgroup: "{{ $labels.instance }}"
449       - alert: smart ssd wearout approaching
450         expr: smart_percentage_used >= 90
451         for: 60m
452         labels:
453           alertgroup: "{{ $labels.instance }}"
454         annotations:
455           percentage_used: "{{ $value | humanizePercentage }}"
456   - name: ssl
457     rules:
458       - alert: ssl certificate probe failed
459         expr: ssl_probe_success == 0
460         for: 60m
461         labels:
462           alertgroup: ssl
463       - alert: ssl certificate expiry
464         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
465         for: 0m
466         labels:
467           alertgroup: ssl
468         annotations:
469           expires_in: "{{ $value | humanizeDuration }}"
470       - alert: ssl certificate revoked
471         expr: ssl_ocsp_response_status == 1
472         for: 0m
473         labels:
474           alertgroup: ssl
475       - alert: ocsp status unknown
476         expr: ssl_ocsp_response_status == 1
477         for: 0m
478         labels:
479           alertgroup: ssl
480   - name: statuscake
481     rules:
482       - alert: statuscake uptime check failing
483         expr: statuscake_uptime{status="down",paused="false"} > 0
484         for: 10m
485         labels:
486           alertgroup: statuscake
487   - name: systemd
488     rules:
489       - alert: systemd failed service
490         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
491         for: 5m
492         labels:
493           alertgroup: "{{ $labels.instance }}"
494       - alert: systemd failed service
495         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
496         for: 6h
497         labels:
498           alertgroup: "{{ $labels.instance }}"
499   - name: tile
500     rules:
501       - alert: renderd replication delay
502         expr: renderd_replication_delay > 120
503         for: 15m
504         labels:
505           alertgroup: tile
506         annotations:
507           delay: "{{ $value | humanizeDuration }}"
508       - alert: missed tile rate
509         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
510         for: 5m
511         labels:
512           alertgroup: tile
513         annotations:
514           miss_rate: "{{ $value | humanizePercentage }}"
515   - name: time
516     rules:
517       - alert: clock not synchronising
518         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
519         for: 5m
520         labels:
521           alertgroup: "{{ $labels.instance }}"
522       - alert: clock skew detected
523         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
524         for: 5m
525         labels:
526           alertgroup: "{{ $labels.instance }}"
527         annotations:
528           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
529   - name: web
530     rules:
531       - alert: web error rate
532         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
533         for: 5m
534         labels:
535           alertgroup: web
536         annotations:
537           error_rate: "{{ $value | humanizePercentage }}"
538       - alert: job processing rate
539         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
540         for: 15m
541         labels:
542           alertgroup: web
543         annotations:
544           job_processing_rate: "{{ $value | humanizePercentage }}"