#!/bin/bash
+# DO NOT EDIT - This file is being maintained by Chef
+
# Exit on error
set -e
# Get the name of the file and the expected pattern
file="$1"
-pattern="^osm-([0-9]{4}-[0-9]{2}-[0-9]{2})\.dmp$"
+pattern="^osm-([0-9]{4})-([0-9]{2})-([0-9]{2})\.dmp$"
# Give up now if the file isn't a database dump
[[ $file =~ $pattern ]] || exit 0
-# Save the date from the file name
-date="${BASH_REMATCH[1]}"
+# Save the year and date from the file name
+year="${BASH_REMATCH[1]}"
+date="${year:2:2}${BASH_REMATCH[2]}${BASH_REMATCH[3]}"
# Check the lock
if [ -f /tmp/planetdump.lock ]; then
if [ "$(ps -p `cat /tmp/planetdump.lock` | wc -l)" -gt 1 ]; then
- echo "Error: Another planetdump is running"
- exit 1
+ echo "Error: Another planetdump is running"
+ exit 1
else
- rm /tmp/planetdump.lock
+ rm /tmp/planetdump.lock
fi
fi
-# Create Lock
+# Redirect this shell's output to a file. This is so that it
+# can be emailed later, since this script is run from incron
+# and incron doesn't yet support MAILTO like cron does. The
+# command below appears to work in bash as well as dash.
+logfile="/tmp/planetdump.log.$$"
+exec > "${logfile}" 2>&1
+
+# Create lock file
echo $$ > /tmp/planetdump.lock
+# Define cleanup function
+function cleanup {
+ # Remove the lock file
+ rm /tmp/planetdump.lock
+
+ # Send an email with the output, since incron doesn't yet
+ # support doing this in the incrontab
+ if [[ -s "$logfile" ]]
+ then
+ mailx -s "Planet dump output: ${file}" zerebubuth@gmail.com < "${logfile}"
+ fi
+
+ # Remove the log file
+ rm -f "${logfile}"
+}
+
+# Remove lock on exit
+trap cleanup EXIT
+
# Change to working directory
cd /store/planetdump
# Cleanup
rm -rf users
-rm -rf changesets changeset_tags
+rm -rf changesets changeset_tags changeset_comments
rm -rf nodes node_tags
rm -rf ways way_tags way_nodes
rm -rf relations relation_tags relation_members
# Run the dump
-time nice -n 19 /store/planet-dump-ng/planet-dump-ng \
- -c "pbzip2 -c" -f "${file}" --dense-nodes=1 \
+time nice -n 19 /opt/planet-dump-ng/planet-dump-ng \
+ --max-concurrency=4 \
+ -c "pbzip2 -c" -f "/store/backup/${file}" --dense-nodes=1 \
-C "changesets-${date}.osm.bz2" \
+ -D "discussions-${date}.osm.bz2" \
-x "planet-${date}.osm.bz2" -X "history-${date}.osm.bz2" \
-p "planet-${date}.osm.pbf" -P "history-${date}.osm.pbf"
-# Move XML dumps into place
-for file in "changesets-${date}.osm.bz2" "planet-${date}.osm.bz2" "history-${date}.osm.bz2"
-do
- md5sum "#{file}" > "#{file}.md5"
- mv "${file}" "${file}.md5" "/store/planet/planet"
-done
-
-# Move PBF dumps into place
-for file in "planet-${date}.osm.pbf" "history-${date}.osm.pbf"
-do
- md5sum "#{file}" > "#{file}.md5"
- mv "${file}" "${file}.md5" "/store/planet/pbf"
-done
-
-# Release lock
-rm /tmp/planetdump.lock
+# Function to install a dump in place
+function install_dump {
+ type="$1"
+ format="$2"
+ dir="$3"
+ year="$4"
+ name="${type}-${date}.osm.${format}"
+ latest="${type}-latest.osm.${format}"
+
+ md5sum "${name}" > "${name}.md5"
+ mkdir -p "${dir}/${year}"
+ mv "${name}" "${name}.md5" "${dir}/${year}"
+ ln -sf "${year:-.}/${name}" "${dir}/${latest}"
+ rm -f "${dir}/${latest}.md5"
+ sed -e "s/${name}/${latest}/" "${dir}/${year}/${name}.md5" > "${dir}/${latest}.md5"
+}
+
+# Move dumps into place
+install_dump "changesets" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "discussions" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "planet" "bz2" "<%= node[:planet][:dump][:xml_directory] %>" "${year}"
+install_dump "history" "bz2" "<%= node[:planet][:dump][:xml_history_directory] %>" "${year}"
+install_dump "planet" "pbf" "<%= node[:planet][:dump][:pbf_directory] %>"
+install_dump "history" "pbf" "<%= node[:planet][:dump][:pbf_history_directory] %>"
+
+# Remove pbf dumps older than 90 days
+find "<%= node[:planet][:dump][:pbf_directory] %>" "<%= node[:planet][:dump][:pbf_history_directory] %>" -maxdepth 1 -mindepth 1 -type f -mtime +90 \( -iname 'planet-*.pbf' -o -iname 'history-*.pbf' -o -iname 'planet-*.pbf.md5' -o -iname 'history-*.pbf.md5' \) -delete