]> git.openstreetmap.org Git - chef.git/blob - cookbooks/planet/templates/default/planetdump.erb
Remove remains of long disabled streaming replication logic
[chef.git] / cookbooks / planet / templates / default / planetdump.erb
1 #!/bin/bash
2
3 # DO NOT EDIT - This file is being maintained by Chef
4
5 # Exit on error
6 set -e
7
8 # Get the name of the file and the expected pattern
9 file="$1"
10 pattern="^osm-([0-9]{4})-([0-9]{2})-([0-9]{2})\.dmp$"
11
12 # Give up now if the file isn't a database dump
13 [[ $file =~ $pattern ]] || exit 0
14
15 # Save the year and date from the file name
16 year="${BASH_REMATCH[1]}"
17 date="${year:2:2}${BASH_REMATCH[2]}${BASH_REMATCH[3]}"
18
19 # Check the lock
20 if [ -f /tmp/planetdump.lock ]; then
21     if [ "$(ps -p `cat /tmp/planetdump.lock` | wc -l)" -gt 1 ]; then
22         echo "Error: Another planetdump is running"
23         exit 1
24     else
25         rm /tmp/planetdump.lock
26     fi
27 fi
28
29 # Redirect this shell's output to a file. This is so that it
30 # can be emailed later, since this script is run from incron
31 # and incron doesn't yet support MAILTO like cron does. The
32 # command below appears to work in bash as well as dash.
33 logfile="/tmp/planetdump.log.$$"
34 exec > "${logfile}" 2>&1
35
36 # Create lock file
37 echo $$ > /tmp/planetdump.lock
38
39 # Define cleanup function
40 function cleanup {
41     # Remove the lock file
42     rm /tmp/planetdump.lock
43
44     # Send an email with the output, since incron doesn't yet
45     # support doing this in the incrontab
46     if [[ -s "$logfile" ]]
47     then
48         mailx -s "Planet dump output: ${file}" zerebubuth@gmail.com < "${logfile}"
49     fi
50
51     # Remove the log file
52     rm -f "${logfile}"
53 }
54
55 # Remove lock on exit
56 trap cleanup EXIT
57
58 # Change to working directory
59 cd /store/planetdump
60
61 # Cleanup
62 rm -rf users
63 rm -rf changesets changeset_tags changeset_comments
64 rm -rf nodes node_tags
65 rm -rf ways way_tags way_nodes
66 rm -rf relations relation_tags relation_members
67
68 # Run the dump
69 time nice -n 19 /opt/planet-dump-ng/planet-dump-ng \
70      --max-concurrency=4 \
71      -c "pbzip2 -c" -f "/store/backup/${file}" --dense-nodes=1 \
72      -C "changesets-${date}.osm.bz2" \
73      -D "discussions-${date}.osm.bz2" \
74      -x "planet-${date}.osm.bz2" -X "history-${date}.osm.bz2" \
75      -p "planet-${date}.osm.pbf" -P "history-${date}.osm.pbf"
76
77 # Function to install a dump in place
78 function install_dump {
79   type="$1"
80   format="$2"
81   dir="$3"
82   year="$4"
83   name="${type}-${date}.osm.${format}"
84   latest="${type}-latest.osm.${format}"
85
86   md5sum "${name}" > "${name}.md5"
87   mkdir -p "${dir}/${year}"
88   mv "${name}" "${name}.md5" "${dir}/${year}"
89   ln -sf "${year:-.}/${name}" "${dir}/${latest}"
90   rm -f "${dir}/${latest}.md5"
91   sed -e "s/${name}/${latest}/" "${dir}/${year}/${name}.md5" > "${dir}/${latest}.md5"
92 }
93
94 # Move dumps into place
95 install_dump "changesets" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
96 install_dump "discussions" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
97 install_dump "planet" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
98 install_dump "history" "bz2" "<%= node[:planet][:dump][:xml_history_directory] %>" "${year}"
99 install_dump "planet" "pbf" "<%= node[:planet][:dump][:pbf_directory] %>"
100 install_dump "history" "pbf" "<%= node[:planet][:dump][:pbf_history_directory] %>"
101
102 # Remove pbf dumps older than 90 days
103 find "<%= node[:planet][:dump][:pbf_directory] %>" "<%= node[:planet][:dump][:pbf_history_directory] %>" -maxdepth 1 -mindepth 1 -type f -mtime +90 \( -iname 'planet-*.pbf' -o -iname 'history-*.pbf' -o -iname 'planet-*.pbf.md5' -o -iname 'history-*.pbf.md5' \) -delete