1 # DO NOT EDIT - This file is being maintained by Chef
7 expr: junos_interface_up{site="amsterdam",name=~"ge-[01]/2/2"} != 1
11 alertgroup: "amsterdam"
13 status: "{{ $value }}"
14 - alert: equinix uplink
15 expr: junos_interface_up{site="amsterdam",name=~"xe-[01]/2/0"} != 1
19 alertgroup: "amsterdam"
21 status: "{{ $value }}"
22 - alert: pdu current draw
23 expr: rPDU2PhaseStatusCurrent{site="amsterdam",rPDU2PhaseStatusIndex="1"} / 10 > 28
27 alertgroup: "amsterdam"
29 current: "{{ $value | humanize }}A"
31 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="amsterdam",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 3.5
35 alertgroup: "amsterdam"
37 current: "{{ $value | humanize }}kVA"
38 - alert: site temperature
39 expr: min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 < 15 or min(rPDU2SensorTempHumidityStatusTempC{site="amsterdam"}) / 10 > 32
43 alertgroup: "amsterdam"
45 temperature: "{{ $value | humanize }}C"
46 - alert: site humidity
47 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 < 0.08 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="amsterdam"}) / 100 > 0.8
51 alertgroup: "amsterdam"
53 humidity: "{{ $value | humanizePercentage }}"
61 alertgroup: "{{ $labels.instance }}"
62 - alert: apache workers busy
63 expr: sum(apache_workers{state="busy"}) by (instance) / sum(apache_scoreboard) by (instance) > 0.8
67 alertgroup: "{{ $labels.instance }}"
69 busy_workers: "{{ $value | humanizePercentage }}"
70 - alert: apache connection limit
71 expr: (apache_connections{state="total"} - on (instance) apache_connections{state="closing"}) / on (instance) (apache_server_limit * on (instance) (apache_threads_per_child + on (instance) (apache_async_request_worker_factor * on (instance) apache_workers{state="idle"} / on(instance) apache_processes{state="all"}))) > 0.8
75 alertgroup: "{{ $labels.instance }}"
77 connections: "{{ $value | humanizePercentage }}"
80 - alert: chef client not running
81 expr: time() - node_systemd_timer_last_trigger_seconds{name="chef-client.timer"} > 3600
85 alertgroup: "{{ $labels.instance }}"
87 down_time: "{{ $value | humanizeDuration }}"
90 - alert: cisco fan alarm
91 expr: rlPhdUnitEnvParamFan1Status{rlPhdUnitEnvParamFan1Status!="normal"} > 0 or rlPhdUnitEnvParamFan2Status{rlPhdUnitEnvParamFan2Status!="normal"} > 0
94 alertgroup: "{{ $labels.site }}"
96 fan_rpm: "{{ with printf \"rlPhdUnitEnvParamFan1Speed{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}rpm{{end}}"
97 - alert: cisco temperature alarm
98 expr: rlPhdUnitEnvParamTempSensorStatus{rlPhdUnitEnvParamTempSensorStatus!="ok"} > 0
101 alertgroup: "{{ $labels.site }}"
103 temp_celsius: "{{ with printf \"rlPhdUnitEnvParamTempSensorValue{site='%s',instance='%s',rlPhdUnitEnvParamStackUnit='%s'}\" $labels.site $labels.instance $labels.rlPhdUnitEnvParamStackUnit | query }}{{ . | first | value | humanize }}C{{end}}"
104 - alert: cisco main power alarm
105 expr: rlPhdUnitEnvParamMainPSStatus{rlPhdUnitEnvParamMainPSStatus!="normal"} > 0
108 alertgroup: "{{ $labels.site }}"
109 - alert: cisco redundant power alarm
110 expr: rlPhdUnitEnvParamRedundantPSStatus{rlPhdUnitEnvParamRedundantPSStatus!="normal"} > 0
113 alertgroup: "{{ $labels.site }}"
116 - alert: cpu pressure
117 expr: rate(node_pressure_cpu_waiting_seconds_total[5m]) > 0.75
121 alertgroup: "{{ $labels.instance }}"
123 pressure: "{{ $value | humanizePercentage }}"
126 - alert: active rails queries
127 expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="rails",state="active"}) by (instance) > 50 and on (instance) chef_role{name="db-master"}
133 queries: "{{ $value }}"
134 - alert: active cgimap queries
135 expr: sum(pg_stat_activity_count{datname="openstreetmap",usename="cgimap",state="active"}) by (instance) > 30 and on (instance) chef_role{name="db-master"}
141 queries: "{{ $value }}"
144 - alert: discourse job failure rate
145 expr: rate(discourse_job_failures[5m]) > 0
149 alertgroup: discourse
151 failure_rate: "{{ $value }} jobs/s"
155 expr: junos_interface_up{site="dublin",name=~"ge-[01]/2/2"} != 1
161 status: "{{ $value }}"
162 - alert: equinix uplink
163 expr: junos_interface_up{site="dublin",name=~"xe-[01]/2/0"} != 1
169 status: "{{ $value }}"
170 - alert: pdu current draw
171 expr: rPDU2PhaseStatusCurrent{site="dublin",rPDU2PhaseStatusIndex="1"} / 10 > 28
177 current: "{{ $value | humanize }}A"
179 expr: sum(avg_over_time(rPDU2PhaseStatusApparentPower{site="dublin",rPDU2PhaseStatusIndex="1"}[1h]) / 100) > 4
185 current: "{{ $value | humanize }}kVA"
186 - alert: site temperature
187 expr: min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 < 18 or min(rPDU2SensorTempHumidityStatusTempC{site="dublin"}) / 10 > 26
193 temperature: "{{ $value | humanize }}C"
194 - alert: site humidity
195 expr: max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 < 0.25 or max(rPDU2SensorTempHumidityStatusRelativeHumidity{site="dublin"}) / 100 > 0.65
201 humidity: "{{ $value | humanizePercentage }}"
204 - alert: fastly error rate
205 expr: sum(rate(fastly_rt_status_group_total{status_group="5xx"}[5m])) by (service_name, datacenter) / sum(rate(fastly_rt_status_group_total[5m])) by (service_name, datacenter) > 0.005
207 keep_firing_for: 450s
211 error_rate: "{{ $value | humanizePercentage }}"
212 - alert: fastly frontend healthcheck warning
213 expr: count(fastly_healthcheck_status == 0) by (service, datacenter) > 2
215 keep_firing_for: 450s
218 - alert: fastly frontend healthcheck critical
219 expr: count(fastly_healthcheck_status == 0) by (service, datacenter) == count(fastly_healthcheck_status) by (service, datacenter)
221 keep_firing_for: 150s
224 - alert: fastly backend healthcheck warning
225 expr: count(fastly_healthcheck_status == 0) by (service, backend) > 10
227 keep_firing_for: 450s
230 - alert: fastly backend healthcheck critical
231 expr: count(fastly_healthcheck_status == 0) by (service, backend) == count(fastly_healthcheck_status) by (service, backend)
233 keep_firing_for: 150s
238 - alert: readonly filesystem
239 expr: node_filesystem_readonly > min_over_time(node_filesystem_readonly[7d])
242 alertgroup: "{{ $labels.instance }}"
243 - alert: filesystem low on space
244 expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.05
246 keep_firing_for: 150s
248 alertgroup: "{{ $labels.instance }}"
250 percentage_free: "{{ $value | humanizePercentage }}"
251 free_bytes: "{{ with printf \"node_filesystem_avail_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
252 total_total: "{{ with printf \"node_filesystem_size_bytes{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value | humanize1024 }}bytes{{end}}"
253 - alert: filesystem low on inodes
254 expr: node_filesystem_files_free / node_filesystem_files < 0.1
257 alertgroup: "{{ $labels.instance }}"
259 percentage_free: "{{ $value | humanizePercentage }}"
260 free_inodes: "{{ with printf \"node_filesystem_files_free{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
261 total_inodes: "{{ with printf \"node_filesystem_files{instance='%s',mountpoint='%s'}\" $labels.instance $labels.mountpoint | query }}{{ . | first | value }}{{end}}"
264 - alert: hwmon fan alarm
265 expr: node_hwmon_fan_alarm == 1
267 keep_firing_for: 150s
269 alertgroup: "{{ $labels.instance }}"
271 fan_rpm: "{{ with printf \"node_hwmon_fan_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
272 fan_min_rpm: "{{ with printf \"node_hwmon_fan_min_rpm{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}rpm{{end}}"
273 - alert: hwmon temperature alarm
274 expr: node_hwmon_temp_alarm == 1
276 keep_firing_for: 150s
278 alertgroup: "{{ $labels.instance }}"
280 temp_celsius: "{{ with printf \"node_hwmon_temp_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
281 temp_max_celsius: "{{ with printf \"node_hwmon_temp_max_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
282 temp_crit_celsius: "{{ with printf \"node_hwmon_temp_crit_celsius{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}C{{end}}"
283 - alert: hwmon voltage alarm
284 expr: node_hwmon_in_alarm == 1
286 keep_firing_for: 150s
288 alertgroup: "{{ $labels.instance }}"
290 in_volts: "{{ with printf \"node_hwmon_in_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
291 in_min_volts: "{{ with printf \"node_hwmon_in_min_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
292 in_max_volts: "{{ with printf \"node_hwmon_in_max_volts{instance='%s',chip='%s',sensor='%s'}\" $labels.instance $labels.chip $labels.sensor | query }}{{ . | first | value | humanize }}V{{end}}"
296 expr: rate(node_pressure_io_waiting_seconds_total[5m]) > 0.6
299 alertgroup: "{{ $labels.instance }}"
301 pressure: "{{ $value | humanizePercentage }}"
304 - alert: ipmi fan alarm
305 expr: ipmi_fan_speed_state > 0
307 keep_firing_for: 150s
309 alertgroup: "{{ $labels.instance }}"
311 fan_speed_rpm: "{{ with printf \"ipmi_fan_speed_rpm{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}rpm{{end}}"
312 - alert: ipmi temperature alarm
313 expr: ipmi_temperature_state > 0
315 keep_firing_for: 150s
317 alertgroup: "{{ $labels.instance }}"
319 temperature_celsius: "{{ with printf \"ipmi_temperature_celsius{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}C{{end}}"
320 - alert: ipmi voltage alarm
321 expr: ipmi_voltage_state > 0
323 keep_firing_for: 150s
325 alertgroup: "{{ $labels.instance }}"
327 voltage_volts: "{{ with printf \"ipmi_voltage_volts{instance='%s',id='%s'}\" $labels.instance $labels.id | query }}{{ . | first | value | humanize }}V{{end}}"
328 - alert: ipmi power alarm
329 expr: ipmi_power_state > 0 or ipmi_sensor_state{type=~"Power .*"} > 0
331 keep_firing_for: 150s
333 alertgroup: "{{ $labels.instance }}"
336 - alert: juniper red alarms
337 expr: juniper_alarms_red_count > 0
339 keep_firing_for: 150s
341 alertgroup: "{{ $labels.site }}"
343 alarm_count: "{{ $value }} alarms"
344 - alert: juniper yellow alarms
345 expr: juniper_alarms_yellow_count > 0
347 keep_firing_for: 150s
349 alertgroup: "{{ $labels.site }}"
351 alarm_count: "{{ $value }} alarms"
352 - alert: juniper cpu alarm
353 expr: junos_route_engine_load_average_five / 2 > 0.5
355 keep_firing_for: 150s
357 alertgroup: "{{ $labels.site }}"
359 load_average: "{{ $value | humanizePercentage }}"
360 - alert: juniper fan alarm
361 expr: junos_environment_fan_up != 1
363 keep_firing_for: 150s
365 alertgroup: "{{ $labels.site }}"
366 - alert: juniper power alarm
367 expr: junos_environment_power_up != 1
369 keep_firing_for: 150s
371 alertgroup: "{{ $labels.site }}"
372 - alert: juniper laser receive power
373 expr: junos_interface_diagnostics_laser_rx_dbm < -12 and on (site, instance, name) junos_interface_admin_up == 1
375 keep_firing_for: 150s
377 alertgroup: "{{ $labels.site }}"
379 power: "{{ $value }} dBm"
380 - alert: juniper laser transmit power
381 expr: junos_interface_diagnostics_laser_output_dbm < -8 and on (site, instance, name) junos_interface_admin_up == 1
383 keep_firing_for: 150s
385 alertgroup: "{{ $labels.site }}"
387 power: "{{ $value }} dBm"
390 - alert: load average
391 expr: sum(node_load5) by (instance) / count(node_cpu_frequency_max_hertz) by (instance) > 2
393 keep_firing_for: 150s
395 alertgroup: "{{ $labels.instance }}"
397 load: "{{ $value | humanizePercentage }}"
403 keep_firing_for: 150s
405 alertgroup: "{{ $labels.instance }}"
406 - alert: exim queue length
407 expr: exim_queue > ignoring(job) exim_queue_limit
413 queue_length: "{{ $value }}"
414 - alert: mailman queue length
415 expr: mailman_queue_length > 200
421 queue_length: "{{ $value }}"
424 - alert: mdadm array inactive
425 expr: node_md_state{state="inactive"} > 0
428 alertgroup: "{{ $labels.instance }}"
430 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
431 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
432 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
433 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
434 - alert: mdadm array degraded
435 expr: sum (node_md_disks{state="active"}) without (state) < node_md_disks_required
438 alertgroup: "{{ $labels.instance }}"
440 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
441 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
442 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
443 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
444 - alert: mdadm disk failed
445 expr: node_md_disks{state="failed"} > 0
448 alertgroup: "{{ $labels.instance }}"
450 required: "{{ with printf \"node_md_disks_required{instance='%s',device='%s'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
451 active: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='active'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
452 failed: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='failed'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
453 spare: "{{ with printf \"node_md_disks{instance='%s',device='%s',state='spare'}\" $labels.instance $labels.device | query }}{{ . | first | value | humanize }} disks{{end}}"
457 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < 0.1
459 keep_firing_for: 450s
461 alertgroup: "{{ $labels.instance }}"
463 memory_free: "{{ $value | humanizePercentage }}"
464 - alert: memory pressure
465 expr: rate(node_pressure_memory_waiting_seconds_total[5m]) > 0.6
469 alertgroup: "{{ $labels.instance }}"
471 pressure: "{{ $value | humanizePercentage }}"
472 - alert: oom kill detected
473 expr: increase(node_vmstat_oom_kill[1m]) > 0
476 alertgroup: "{{ $labels.instance }}"
478 new_oom_kills: "{{ $value }}"
485 alertgroup: "{{ $labels.instance }}"
486 - alert: mysql connection limit
487 expr: mysql_global_status_max_used_connections / mysql_global_variables_max_connections > 0.8
490 alertgroup: "{{ $labels.instance }}"
492 connections_used: "{{ $value | humanizePercentage }}"
493 - alert: mysql connection errors
494 expr: increase(mysql_global_status_connection_errors_total[1m]) > 0
497 alertgroup: "{{ $labels.instance }}"
499 error_count: "{{ $value }}"
502 - alert: interface redundancy lost
503 expr: node_bonding_active < 2 and on (instance, master) label_replace(chef_network_interface{bond_mode="802.3ad"}, "master", "$1", "name", "(.*)")
505 keep_firing_for: 150s
507 alertgroup: "{{ $labels.instance }}"
509 link_count: "{{ $value }}"
510 - alert: interface transmit rate
511 expr: rate(node_network_transmit_bytes_total[1m]) / node_network_speed_bytes > 0.99
513 keep_firing_for: 150s
515 alertgroup: "{{ $labels.instance }}"
517 bandwidth_used: "{{ $value | humanizePercentage }}"
518 - alert: interface receive rate
519 expr: rate(node_network_receive_bytes_total[1m]) / node_network_speed_bytes > 0.99
521 keep_firing_for: 150s
523 alertgroup: "{{ $labels.instance }}"
525 bandwidth_used: "{{ $value | humanizePercentage }}"
526 - alert: interface transmit errors
527 expr: rate(node_network_transmit_errs_total{device!~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device!~"wg.*"}[1m]) > 0.01
529 keep_firing_for: 150s
531 alertgroup: "{{ $labels.instance }}"
533 error_rate: "{{ $value | humanizePercentage }}"
534 - alert: wireguard interface transmit errors
535 expr: rate(node_network_transmit_errs_total{device=~"wg.*"}[1m]) / rate(node_network_transmit_packets_total{device=~"wg.*"}[1m]) > 0.05
539 alertgroup: "{{ $labels.instance }}"
541 error_rate: "{{ $value | humanizePercentage }}"
542 - alert: interface receive errors
543 expr: rate(node_network_receive_errs_total[1m]) / rate(node_network_receive_packets_total[1m]) > 0.01
545 keep_firing_for: 150s
547 alertgroup: "{{ $labels.instance }}"
549 error_rate: "{{ $value | humanizePercentage }}"
550 - alert: conntrack entries
551 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
553 keep_firing_for: 150s
555 alertgroup: "{{ $labels.instance }}"
557 entries_used: "{{ $value | humanizePercentage }}"
560 - alert: nominatim replication delay
561 expr: nominatim_replication_delay > 10800
565 alertgroup: nominatim
567 delay: "{{ $value | humanizeDuration }}"
568 - alert: nominatim connections
569 expr: sum(nginx_connections_writing and on (instance) chef_role{name="nominatim"}) > 2500
571 keep_firing_for: 450s
573 alertgroup: nominatim
576 - alert: overpass osm database age
577 expr: overpass_database_age_seconds{database="osm"} > 3600
583 age: "{{ $value | humanizeDuration }}"
584 - alert: overpass area database age
585 expr: overpass_database_age_seconds{database="area"} > 86400
591 age: "{{ $value | humanizeDuration }}"
594 - alert: passenger down
595 expr: passenger_up == 0
597 keep_firing_for: 150s
599 alertgroup: "{{ $labels.instance }}"
600 - alert: passenger queuing
601 expr: passenger_top_level_request_queue > 0
603 keep_firing_for: 150s
605 alertgroup: "{{ $labels.instance }}"
606 - alert: passenger application queuing
607 expr: passenger_app_request_queue > 0
609 keep_firing_for: 150s
611 alertgroup: "{{ $labels.instance }}"
614 - alert: planet dump overdue
615 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/(pbf|planet)/.*"} > 7 * 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
620 overdue_by: "{{ $value | humanizeDuration }}"
621 - alert: notes dump overdue
622 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/notes/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
627 overdue_by: "{{ $value | humanizeDuration }}"
628 - alert: daily replication feed delayed
629 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/day/.*"} > 86400 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
635 delayed_by: "{{ $value | humanizeDuration }}"
636 - alert: hourly replication feed delayed
637 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/hour/.*"} > 3600 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
642 delayed_by: "{{ $value | humanizeDuration }}"
643 - alert: minutely replication feed delayed
644 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/minute/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
646 keep_firing_for: 150s
650 delayed_by: "{{ $value | humanizeDuration }}"
651 - alert: changeset replication feed delayed
652 expr: time() - file_stat_modif_time_seconds{path=~"/store/planet/replication/changesets/.*"} > 60 and ignoring (job, name, path) chef_role{name="planetdump"} == 1
654 keep_firing_for: 150s
658 delayed_by: "{{ $value | humanizeDuration }}"
661 - alert: postgresql down
665 alertgroup: "{{ $labels.instance }}"
666 - alert: postgresql replication delay
667 expr: pg_replication_lag_seconds > 30
671 alertgroup: "{{ $labels.instance }}"
673 delay: "{{ $value | humanizeDuration }}"
674 - alert: postgresql connection limit
675 expr: sum (pg_stat_activity_count) by (instance, server) / sum (pg_settings_max_connections) by (instance, server) > 0.8
679 alertgroup: "{{ $labels.instance }}"
681 connections_used: "{{ $value | humanizePercentage }}"
682 - alert: postgresql deadlocks
683 expr: increase(pg_stat_database_deadlocks{datname!="nominatim"}[1m]) > 5
686 alertgroup: "{{ $labels.instance }}"
688 new_deadlocks: "{{ $value }}"
689 - alert: postgresql idle transactions
690 expr: sum(pg_process_idle_seconds_count{state="idle in transaction"}) by (instance, server) > sum(pg_process_idle_seconds_bucket{state="idle in transaction",le="300"}) by (instance, server)
692 keep_firing_for: 150s
694 alertgroup: "{{ $labels.instance }}"
696 queries: "{{ $value }}"
699 - alert: prometheus configuration error
700 expr: prometheus_config_last_reload_successful == 0
704 alertgroup: "prometheus"
705 - alert: prometheus target missing
710 alertgroup: "prometheus"
711 - alert: node exporter text file scrape error
712 expr: node_textfile_scrape_error > 0
716 alertgroup: "prometheus"
719 - alert: raid controller battery failed
720 expr: ohai_controller_info{battery_status="failed"} > 0
722 keep_firing_for: 150s
724 alertgroup: "{{ $labels.instance }}"
725 - alert: raid controller battery recharging
726 expr: ohai_controller_info{battery_status="recharging"} > 0
730 alertgroup: "{{ $labels.instance }}"
731 - alert: raid array degraded
732 expr: ohai_array_info{status="degraded"} > 0
734 keep_firing_for: 150s
736 alertgroup: "{{ $labels.instance }}"
737 - alert: raid disk failed
738 expr: ohai_disk_info{status="failed"} > 0
740 keep_firing_for: 150s
742 alertgroup: "{{ $labels.instance }}"
745 - alert: memory controller errors
746 expr: increase(rasdaemon_mc_events_total[1m]) > 0
749 alertgroup: "{{ $labels.instance }}"
751 new_errors: "{{ $value }}"
752 - alert: pcie aer errors
753 expr: increase(rasdaemon_aer_events_total[1m]) > 0
756 alertgroup: "{{ $labels.instance }}"
758 new_errors: "{{ $value }}"
761 - alert: dnssec validation failures
762 expr: rate(resolved_dnssec_verdicts_total{result="bogus"}[1m]) > 1
764 keep_firing_for: 150s
766 alertgroup: "{{ $labels.instance }}"
769 - alert: smart failure
770 expr: smart_health_status == 0
774 alertgroup: "{{ $labels.instance }}"
775 - alert: smart ssd wearout approaching
776 expr: smart_percentage_used / 100 >= 0.8
780 alertgroup: "{{ $labels.instance }}"
782 percentage_used: "{{ $value | humanizePercentage }}"
786 expr: 1 - (rate(smokeping_response_duration_seconds_count[5m]) / rate(smokeping_requests_total[5m])) > 0.02
790 alertgroup: smokeping
792 loss_rate: "{{ $value | humanizePercentage }}"
795 - alert: snmp pdus missing
796 expr: max_over_time(snmp_scrape_pdus_returned[1d]) - snmp_scrape_pdus_returned > 0
801 missing_pdus: "{{ $value }}"
804 - alert: ssl certificate probe failed
805 expr: ssl_probe_success == 0
809 - alert: ssl certificate expiry
810 expr: ssl_verified_cert_not_after{chain_no="0"} - time() < 86400 * 14
815 expires_in: "{{ $value | humanizeDuration }}"
816 - alert: ssl certificate revoked
817 expr: ssl_ocsp_response_status == 1
821 - alert: ocsp status unknown
822 expr: ssl_ocsp_response_status == 1
828 - alert: statuscake uptime check failing
829 expr: statuscake_paused == 0 and statuscake_up == 0
832 alertgroup: statuscake
835 - alert: systemd failed service
836 expr: node_systemd_unit_state{state="failed",name!="chef-client.service"} == 1
838 keep_firing_for: 150s
840 alertgroup: "{{ $labels.instance }}"
841 - alert: systemd failed chef client service
842 expr: sum_over_time(node_systemd_unit_state{state="inactive",name="chef-client.service"}[6h]) == 0
845 alertgroup: "{{ $labels.instance }}"
848 - alert: taginfo planet age
849 expr: time() - taginfo_data_from_seconds > 129600 and on (instance) chef_role{name="taginfo"}
854 age: "{{ $value | humanizeDuration }}"
855 - alert: taginfo database age
856 expr: time() - taginfo_database_update_finish_seconds > 129600 and on (instance) chef_role{name="taginfo"}
861 age: "{{ $value | humanizeDuration }}"
862 - alert: taginfo database size
863 expr: abs(delta(taginfo_database_size_bytes[30m])) / taginfo_database_size_bytes > 0.1
869 size_change: "{{ $value | humanizePercentage }}"
872 - alert: renderd replication delay
873 expr: renderd_replication_delay > 120
879 delay: "{{ $value | humanizeDuration }}"
880 - alert: missed tile rate
881 expr: sum(rate(modtile_http_response_total{code="404"}[5m])) by (instance) / sum(rate(modtile_http_response_total[5m])) by (instance) > 0.05
887 miss_rate: "{{ $value | humanizePercentage }}"
888 - alert: tile render rate
889 expr: sum(rate(renderd_zoom_metatiles_total[5m])) by (instance) == 0
895 render_rate: "{{ $value }} tiles/s"
898 - alert: clock not synchronising
899 expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
903 alertgroup: "{{ $labels.instance }}"
904 - alert: clock skew detected
905 expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
909 alertgroup: "{{ $labels.instance }}"
911 skew: "{{ with printf \"node_timex_offset_seconds{instance='%s'}\" $labels.instance | query }} {{ . | humanizeDuration }}{{ end }}"
914 - alert: web error rate
915 expr: sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) / sum(rate(api_call_count_total[5m])) by (instance) > 0.002 and sum(rate(api_call_count_total{status=~"50[0-8]|5[1-9][0-9]"}[5m])) by (instance) > 0.05
921 error_rate: "{{ $value | humanizePercentage }}"
922 - alert: job processing rate
923 expr: rate(pg_stat_user_tables_n_tup_del{datname="openstreetmap",relname="delayed_jobs"}[1h]) / rate(pg_stat_user_tables_n_tup_ins{datname="openstreetmap",relname="delayed_jobs"}[1h]) < 0.9 and ignoring(job, name, datname, relname, schemaname, server) chef_role{name="db-master"} == 1
929 job_processing_rate: "{{ $value | humanizePercentage }}"
932 - alert: aws s3 replication lag
933 expr: aws_s3_replication_latency_maximum > 7200
939 s3_object_replication_lag: "{{ $value | humanizeDuration }}"
940 - alert: aws s3 replication failures
941 expr: aws_s3_operations_failed_replication_sum > 0
947 s3_object_replication_failures: "{{ $value }} objects"
948 - alert: aws s3 replication pending
949 expr: aws_s3_operations_pending_replication_maximum > 1000
955 s3_object_replication_pending: "{{ $value }} objects"