#!/bin/bash
-ulimit -m 4194304 -v 4194304
-set -e
-
-if [ -f /tmp/planetdump.lock ]; then
- if [ "$(ps -p `cat /tmp/planetdump.lock` | wc -l)" -gt 1 ]; then
- # process is still running
- echo Error: Another planetdump is running
- exit 1
- else
- # process not running, but lock file not deleted?
- rm /tmp/planetdump.lock
- fi
-fi
-
-#Create Lock
-echo $$ > /tmp/planetdump.lock
+# DO NOT EDIT - This file is being maintained by Chef
-cur_date=`date +%y%m%d`
-cur_year=`date +%Y`
-cur_planet=planet-${cur_date}.osm
-cur_changeset=changesets-${cur_date}.osm
-planet_dir=/store/planet/
-
-export CONNECTION_PARAMS='dbname=openstreetmap host=localhost user=planetdump password=<%= @password %>'
-export PATH='/usr/local/bin:/usr/bin:/bin:/usr/bin/X11'
+# Exit on error
+set -e
+# Get the name of the file and the expected pattern
+file="$1"
+pattern="^osm-([0-9]{4})-([0-9]{2})-([0-9]{2})\.dmp$"
-if [ \! -d ${planet_dir}/planet/ ]
- then
- echo ${planet_dir}planet/ does not exist
- exit 1
- fi
-if [ \! -d ${planet_dir}/planet/${cur_year}/ ]; then mkdir ${planet_dir}/planet/${cur_year}/; fi
-cd ${planet_dir}/planet/${cur_year}/
+# Give up now if the file isn't a database dump
+[[ $file =~ $pattern ]] || exit 0
-/opt/planetdump/planet06_pg | pbzip2 -p6 -7c > .${cur_planet}.bz2.new
-/opt/planetdump/planet06_pg --changesets | pbzip2 -p6 -9c > .${cur_changeset}.bz2.new
+# Save the year and date from the file name
+year="${BASH_REMATCH[1]}"
+date="${year:2:2}${BASH_REMATCH[2]}${BASH_REMATCH[3]}"
-planet_size=$(du -sb .${cur_planet}.bz2.new | awk '{ print $1 }')
-changeset_size=$(du -sb .${cur_changeset}.bz2.new | awk '{ print $1 }')
-if ((planet_size<28000000000)); then
- echo Planet .${cur_planet}.bz2.new too small
- exit 1
-fi
-if ((changeset_size<600000000)); then
- echo Changeset .${cur_changeset}.bz2.new too small
- exit 1
+# Check the lock
+if [ -f /tmp/planetdump.lock ]; then
+ if [ "$(ps -p `cat /tmp/planetdump.lock` | wc -l)" -gt 1 ]; then
+ echo "Error: Another planetdump is running"
+ exit 1
+ else
+ rm /tmp/planetdump.lock
+ fi
fi
-mv .${cur_planet}.bz2.new ${cur_planet}.bz2
-mv .${cur_changeset}.bz2.new ${cur_changeset}.bz2
-md5sum ${cur_planet}.bz2 > ${cur_planet}.bz2.md5
-md5sum ${cur_changeset}.bz2 > ${cur_changeset}.bz2.md5
-
-#link planet latest to the new file
-cd ${planet_dir}/planet/
-
-ln -fs ${cur_year}/${cur_planet}.bz2 planet-latest.osm.bz2
-ln -fs ${cur_year}/${cur_changeset}.bz2 changesets-latest.osm.bz2
+# Redirect this shell's output to a file. This is so that it
+# can be emailed later, since this script is run from incron
+# and incron doesn't yet support MAILTO like cron does. The
+# command below appears to work in bash as well as dash.
+logfile="/tmp/planetdump.log.$$"
+exec > "${logfile}" 2>&1
-# mangle md5 files for 'latest' ones
-rm -f planet-latest.osm.bz2.md5
-rm -f changesets-latest.osm.bz2.md5
-
-sed -e "s/${cur_planet}.bz2/planet-latest.osm.bz2/" ${cur_year}/${cur_planet}.bz2.md5 > planet-latest.osm.bz2.md5
-sed -e "s/${cur_changeset}.bz2/changesets-latest.osm.bz2/" ${cur_year}/${cur_changeset}.bz2.md5 > changesets-latest.osm.bz2.md5
-
-rm /tmp/planetdump.lock
+# Create lock file
+echo $$ > /tmp/planetdump.lock
+# Define cleanup function
+function cleanup {
+ # Remove the lock file
+ rm /tmp/planetdump.lock
+
+ # Send an email with the output, since incron doesn't yet
+ # support doing this in the incrontab
+ if [[ -s "$logfile" ]]
+ then
+ mailx -s "Planet dump output: ${file}" zerebubuth@gmail.com < "${logfile}"
+ fi
+
+ # Remove the log file
+ rm -f "${logfile}"
+}
+
+# Remove lock on exit
+trap cleanup EXIT
+
+# Change to working directory
+cd /store/planetdump
+
+# Cleanup
+rm -rf users
+rm -rf changesets changeset_tags changeset_comments
+rm -rf nodes node_tags
+rm -rf ways way_tags way_nodes
+rm -rf relations relation_tags relation_members
+
+# Run the dump
+time nice -n 19 /opt/planet-dump-ng/planet-dump-ng \
+ --max-concurrency=4 \
+ -c "pbzip2 -c" -f "/store/backup/${file}" --dense-nodes=1 \
+ -C "changesets-${date}.osm.bz2" \
+ -D "discussions-${date}.osm.bz2" \
+ -x "planet-${date}.osm.bz2" -X "history-${date}.osm.bz2" \
+ -p "planet-${date}.osm.pbf" -P "history-${date}.osm.pbf"
+
+# Function to install a dump in place
+function install_dump {
+ type="$1"
+ format="$2"
+ dir="$3"
+ year="$4"
+ name="${type}-${date}.osm.${format}"
+ latest="${type}-latest.osm.${format}"
+
+ md5sum "${name}" > "${name}.md5"
+ mkdir -p "${dir}/${year}"
+ mv "${name}" "${name}.md5" "${dir}/${year}"
+ ln -sf "${year:-.}/${name}" "${dir}/${latest}"
+ rm -f "${dir}/${latest}.md5"
+ sed -e "s/${name}/${latest}/" "${dir}/${year}/${name}.md5" > "${dir}/${latest}.md5"
+}
+
+# Move dumps into place
+install_dump "changesets" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "discussions" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "planet" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "history" "bz2" "<%= node[:planet][:dump][:xml_history_directory] %>" "${year}"
+install_dump "planet" "pbf" "<%= node[:planet][:dump][:pbf_directory] %>"
+install_dump "history" "pbf" "<%= node[:planet][:dump][:pbf_history_directory] %>"
+
+# Remove pbf dumps older than 90 days
+find "<%= node[:planet][:dump][:pbf_directory] %>" "<%= node[:planet][:dump][:pbf_history_directory] %>" -maxdepth 1 -mindepth 1 -type f -mtime +90 \( -iname 'planet-*.pbf' -o -iname 'history-*.pbf' -o -iname 'planet-*.pbf.md5' -o -iname 'history-*.pbf.md5' \) -delete