]> git.openstreetmap.org Git - chef.git/blob - cookbooks/prometheus/templates/default/alert_rules.yml.erb
Fix syntax error in alert rules
[chef.git] / cookbooks / prometheus / templates / default / alert_rules.yml.erb
1 # DO NOT EDIT - This file is being maintained by Chef
2
3 groups:
4   - name: alertmanager
5     rules:
6       - alert: prometheus target missing
7         expr: up == 0
8         for: 10m
9         labels:
10           alertgroup: "prometheus"
11   - name: amsterdam
12     rules:
13       - alert: pdu current draw
14         expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 10
15         for: 5m
16         labels:
17           alertgroup: "amsterdam"
18         annotations:
19           current: "{{ $value | humanize }}A"
20       - alert: site current draw
21         expr: sum(rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10) > 13
22         for: 5m
23         labels:
24           alertgroup: "amsterdam"
25         annotations:
26           current: "{{ $value | humanize }}A"
27       - alert: site temperature
28         expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 25
29         for: 5m
30         labels:
31           alertgroup: "amsterdam"
32         annotations:
33           temperature: "{{ $value | humanize }}C"
34       - alert: site humidity
35         expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.65
36         for: 5m
37         labels:
38           alertgroup: "amsterdam"
39         annotations:
40           humidity: "{{ $value | humanizePercentage }}"
41   - name: apache
42     rules:
43       - alert: apache down
44         expr: apache_up == 0
45         for: 5m
46         labels:
47           alertgroup: "{{ $labels.instance }}"
48       - alert: apache workers busy
49         expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
50         for: 5m
51         labels:
52           alertgroup: "{{ $labels.instance }}"
53         annotations:
54           busy_workers: "{{ $value | humanizePercentage }}"
55       - alert: apache low request rate
56         expr: rate(apache_accesses_total[5m]) / rate(apache_accesses_total[1h] offset 1w) < 0.25 and rate(apache_accesses_total[1h] offset 1w) > 2
57         for: 15m
58         labels:
59           alertgroup: "{{ $labels.instance }}"
60         annotations:
61           request_rate: "{{ $value | humanizePercentage }}"
62   - name: chef
63     rules:
64       - alert: chef client not running
65         expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
66         for: 12h
67         labels:
68           alertgroup: "{{ $labels.instance }}"
69         annotations:
70           down_time: "{{ $value | humanizeDuration }}"
71   - name: cisco
72     rules:
73       - alert: cisco fan alarm
74         expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
75         for: 5m
76         labels:
77           alertgroup: "{{ $labels.site }}"
78         annotations:
79           fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
80       - alert: cisco temperature alarm
81         expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
82         for: 5m
83         labels:
84           alertgroup: "{{ $labels.site }}"
85         annotations:
86           temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
87       - alert: cisco main power alarm
88         expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
89         for: 5m
90         labels:
91           alertgroup: "{{ $labels.site }}"
92       - alert: cisco redundant power alarm
93         expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
94         for: 5m
95         labels:
96           alertgroup: "{{ $labels.site }}"
97   - name: cpu
98     rules:
99       - alert: cpu pressure
100         expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.6
101         for: 15m
102         labels:
103           alertgroup: "{{ $labels.instance }}"
104         annotations:
105           pressure: "{{ $value | humanizePercentage }}"
106   - name: database
107     rules:
108       - alert: postgres replication delay
109         expr: pg_replication_lag_seconds > 5
110         for: 5m
111         labels:
112           alertgroup: database
113         annotations:
114           delay: "{{ $value | humanizeDuration }}"
115   - name: fastly
116     rules:
117       - alert: fastly error rate
118         expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
119         for: 15m
120         labels:
121           alertgroup: fastly
122         annotations:
123           error_rate: "{{ $value | humanizePercentage }}"
124       - alert: fastly healthcheck failing
125         expr: fastly_healthcheck_status == 0
126         for: 5m
127         labels:
128           alertgroup: fastly
129   - name: filesystem
130     rules:
131       - alert: readonly filesystem
132         expr: node_filesystem_readonly == 1
133         for: 0m
134         labels:
135           alertgroup: "{{ $labels.instance }}"
136       - alert: filesystem low on space
137         expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
138         for: 5m
139         labels:
140           alertgroup: "{{ $labels.instance }}"
141         annotations:
142           percentage_free: "{{ $value | humanizePercentage }}"
143           free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
144           total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
145       - alert: filesystem low on inodes
146         expr: node_filesystem_files_free / node_filesystem_files < 0.1
147         for: 5m
148         labels:
149           alertgroup: "{{ $labels.instance }}"
150         annotations:
151           percentage_free: "{{ $value | humanizePercentage }}"
152           free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
153           total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
154   - name: hwmon
155     rules:
156       - alert: hwmon fan alarm
157         expr: node_hwmon_fan_alarm == 1
158         for: 5m
159         labels:
160           alertgroup: "{{ $labels.instance }}"
161         annotations:
162           fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
163           fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
164       - alert: hwmon temperature alarm
165         expr: node_hwmon_temp_alarm == 1
166         for: 5m
167         labels:
168           alertgroup: "{{ $labels.instance }}"
169         annotations:
170           temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
171           temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
172           temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
173       - alert: hwmon voltage alarm
174         expr: node_hwmon_in_alarm == 1
175         for: 5m
176         labels:
177           alertgroup: "{{ $labels.instance }}"
178         annotations:
179           in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
180           in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
181           in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
182   - name: io
183     rules:
184       - alert: io pressure
185         expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
186         for: 60m
187         labels:
188           alertgroup: "{{ $labels.instance }}"
189         annotations:
190           pressure: "{{ $value | humanizePercentage }}"
191   - name: ipmi
192     rules:
193       - alert: ipmi fan alarm
194         expr: ipmi_fan_speed_state > 0
195         for: 5m
196         labels:
197           alertgroup: "{{ $labels.instance }}"
198         annotations:
199           fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
200       - alert: ipmi temperature alarm
201         expr: ipmi_temperature_state > 0
202         for: 5m
203         labels:
204           alertgroup: "{{ $labels.instance }}"
205         annotations:
206           temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
207       - alert: ipmi voltage alarm
208         expr: ipmi_voltage_state > 0
209         for: 5m
210         labels:
211           alertgroup: "{{ $labels.instance }}"
212         annotations:
213           voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
214       - alert: ipmi power alarm
215         expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
216         for: 5m
217         labels:
218           alertgroup: "{{ $labels.instance }}"
219   - name: juniper
220     rules:
221       - alert: juniper fan alarm
222         expr: jnxOperatingState{jnxOperatingContentsIndex="4",jnxOperatingState!="running"} > 0
223         for: 5m
224         labels:
225           alertgroup: "{{ $labels.site }}"
226       - alert: juniper power alarm
227         expr: jnxOperatingState{jnxOperatingContentsIndex="2",jnxOperatingState!="running"} > 0
228         for: 5m
229         labels:
230           alertgroup: "{{ $labels.site }}"
231   - name: mail
232     rules:
233       - alert: exim queue length
234         expr: exim_queue > exim_queue_limit
235         for: 60m
236         labels:
237           alertgroup: mail
238         annotations:
239           queue_length: "{{ $value }}"
240       - alert: mailman queue length
241         expr: mailman_queue_length > 200
242         for: 60m
243         labels:
244           alertgroup: mail
245         annotations:
246           queue_length: "{{ $value }}"
247   - name: mdadm
248     rules:
249       - alert: mdadm array inactive
250         expr: node_md_state{state="inactive"} > 0
251         for: 0m
252         labels:
253           alertgroup: "{{ $labels.instance }}"
254         annotations:
255           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
256           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
257           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
258           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
259       - alert: mdadm array degraded
260         expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
261         for: 0m
262         labels:
263           alertgroup: "{{ $labels.instance }}"
264         annotations:
265           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
266           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
267           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
268           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
269       - alert: mdadm disk failed
270         expr: node_md_disks{state="failed"} > 0
271         for: 0m
272         labels:
273           alertgroup: "{{ $labels.instance }}"
274         annotations:
275           required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
276           active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
277           failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
278           spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
279   - name: memory
280     rules:
281       - alert: low memory
282         expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
283         for: 15m
284         labels:
285           alertgroup: "{{ $labels.instance }}"
286         annotations:
287           memory_free: "{{ $value | humanizePercentage }}"
288       - alert: memory pressure
289         expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
290         for: 60m
291         labels:
292           alertgroup: "{{ $labels.instance }}"
293         annotations:
294           pressure: "{{ $value | humanizePercentage }}"
295       - alert: oom kill detected
296         expr: increase(node_vmstat_oom_kill[1m]) > 0
297         for: 0m
298         labels:
299           alertgroup: "{{ $labels.instance }}"
300         annotations:
301           new_oom_kills: "{{ $value }}"
302   - name: network
303     rules:
304       - alert: interface transmit rate
305         expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.98
306         for: 5m
307         labels:
308           alertgroup: "{{ $labels.instance }}"
309         annotations:
310           bandwidth_used: "{{ $value | humanizePercentage }}"
311       - alert: interface receive rate
312         expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.98
313         for: 5m
314         labels:
315           alertgroup: "{{ $labels.instance }}"
316         annotations:
317           bandwidth_used: "{{ $value | humanizePercentage }}"
318       - alert: interface transmit errors
319         expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
320         for: 5m
321         labels:
322           alertgroup: "{{ $labels.instance }}"
323         annotations:
324           error_rate: "{{ $value | humanizePercentage }}"
325       - alert: interface transmit errors
326         expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
327         for: 1h
328         labels:
329           alertgroup: "{{ $labels.instance }}"
330         annotations:
331           error_rate: "{{ $value | humanizePercentage }}"
332       - alert: interface receive errors
333         expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
334         for: 5m
335         labels:
336           alertgroup: "{{ $labels.instance }}"
337         annotations:
338           error_rate: "{{ $value | humanizePercentage }}"
339       - alert: conntrack entries
340         expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
341         for: 5m
342         labels:
343           alertgroup: "{{ $labels.instance }}"
344         annotations:
345           entries_used: "{{ $value | humanizePercentage }}"
346   - name: planet
347     rules:
348       - alert: planet dump overdue
349         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
350         for: 24h
351         labels:
352           alertgroup: planet
353         annotations:
354           overdue_by: "{{ $value | humanizeDuration }}"
355       - alert: notes dump overdue
356         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
357         for: 6h
358         labels:
359           alertgroup: planet
360         annotations:
361           overdue_by: "{{ $value | humanizeDuration }}"
362       - alert: daily replication feed delayed
363         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
364         for: 3h
365         labels:
366           alertgroup: planet
367         annotations:
368           delayed_by: "{{ $value | humanizeDuration }}"
369       - alert: hourly replication feed delayed
370         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
371         for: 30m
372         labels:
373           alertgroup: planet
374         annotations:
375           delayed_by: "{{ $value | humanizeDuration }}"
376       - alert: minutely replication feed delayed
377         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
378         for: 5m
379         labels:
380           alertgroup: planet
381         annotations:
382           delayed_by: "{{ $value | humanizeDuration }}"
383       - alert: changeset replication feed delayed
384         expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
385         for: 5m
386         labels:
387           alertgroup: planet
388         annotations:
389           delayed_by: "{{ $value | humanizeDuration }}"
390   - name: postgresql
391     rules:
392       - alert: postgresql down
393         expr: pg_up == 0
394         for: 1m
395         labels:
396           alertgroup: "{{ $labels.instance }}"
397       - alert: postgresql replication delay
398         expr: pg_replication_lag_seconds > 5
399         for: 1m
400         labels:
401           alertgroup: "{{ $labels.instance }}"
402         annotations:
403           delay: "{{ $value | humanizeDuration }}"
404       - alert: postgresql connection limit
405         expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
406         for: 1m
407         labels:
408           alertgroup: "{{ $labels.instance }}"
409         annotations:
410           connections_used: "{{ $value | humanizePercentage }}"
411       - alert: postgresql deadlocks
412         expr: increase(pg_stat_database_deadlocks[1m]) > 5
413         for: 0m
414         labels:
415           alertgroup: "{{ $labels.instance }}"
416         annotations:
417           new_deadlocks: "{{ $value }}"
418       - alert: postgresql slow queries
419         expr: pg_slow_queries > 0
420         for: 5m
421         labels:
422           alertgroup: "{{ $labels.instance }}"
423         annotations:
424           queries: "{{ $value }}"
425   - name: raid
426     rules:
427       - alert: raid array degraded
428         expr: ohai_array_info{status="degraded"} > 0
429         for: 5m
430         labels:
431           alertgroup: "{{ $labels.instance }}"
432       - alert: raid disk failed
433         expr: ohai_disk_info{status="failed"} > 0
434         for: 5m
435         labels:
436           alertgroup: "{{ $labels.instance }}"
437   - name: smart
438     rules:
439       - alert: smart failure
440         expr: smart_health_status == 0
441         for: 60m
442         labels:
443           alertgroup: "{{ $labels.instance }}"
444       - alert: smart ssd wearout approaching
445         expr: smart_percentage_used >= 90
446         for: 60m
447         labels:
448           alertgroup: "{{ $labels.instance }}"
449         annotations:
450           percentage_used: "{{ $value | humanizePercentage }}"
451   - name: ssl
452     rules:
453       - alert: ssl certificate probe failed
454         expr: ssl_probe_success == 0
455         for: 60m
456         labels:
457           alertgroup: ssl
458       - alert: ssl certificate expiry
459         expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
460         for: 0m
461         labels:
462           alertgroup: ssl
463         annotations:
464           expires_in: "{{ $value | humanizeDuration }}"
465       - alert: ssl certificate revoked
466         expr: ssl_ocsp_response_status == 1
467         for: 0m
468         labels:
469           alertgroup: ssl
470       - alert: ocsp status unknown
471         expr: ssl_ocsp_response_status == 1
472         for: 0m
473         labels:
474           alertgroup: ssl
475   - name: statuscake
476     rules:
477       - alert: statuscake uptime check failing
478         expr: statuscake_uptime{status="down",paused="false"} > 0
479         for: 10m
480         labels:
481           alertgroup: statuscake
482   - name: systemd
483     rules:
484       - alert: systemd failed service
485         expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
486         for: 5m
487         labels:
488           alertgroup: "{{ $labels.instance }}"
489       - alert: systemd failed service
490         expr: node_systemd_unit_state{state="failed",name="chef-client.service"} == 1
491         for: 6h
492         labels:
493           alertgroup: "{{ $labels.instance }}"
494   - name: tile
495     rules:
496       - alert: renderd replication delay
497         expr: renderd_replication_delay > 120
498         for: 15m
499         labels:
500           alertgroup: tile
501         annotations:
502           delay: "{{ $value | humanizeDuration }}"
503       - alert: missed tile rate
504         expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
505         for: 5m
506         labels:
507           alertgroup: tile
508         annotations:
509           miss_rate: "{{ $value | humanizePercentage }}"
510   - name: time
511     rules:
512       - alert: clock not synchronising
513         expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
514         for: 5m
515         labels:
516           alertgroup: "{{ $labels.instance }}"
517       - alert: clock skew detected
518         expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
519         for: 5m
520         labels:
521           alertgroup: "{{ $labels.instance }}"
522         annotations:
523           skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
524   - name: web
525     rules:
526       - alert: web error rate
527         expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002
528         for: 5m
529         labels:
530           alertgroup: web
531         annotations:
532           error_rate: "{{ $value | humanizePercentage }}"
533       - alert: job processing rate
534         expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[5m]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[5m]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
535         for: 15m
536         labels:
537           alertgroup: web
538         annotations:
539           job_processing_rate: "{{ $value | humanizePercentage }}"