website/reverse.php
website/search.php
website/status.php
+ website/403.html
+ website/509.html
+ website/crossdomain.xml
+ website/favicon.ico
+ website/last_update.php
+ website/nominatim.xml
+ website/robots.txt
+ website/taginfo.json
utils/blocks.php
utils/country_languages.php
utils/imports.php
protected $aExcludePlaceIDs = array();
protected $bDeDupe = true;
- protected $bReverseInPlan = false;
+ protected $bReverseInPlan = true;
protected $iLimit = 20;
protected $iFinalLimit = 10;
// TODO: filter out the pointless search terms (2 letter name tokens and less)
// they might be right - but they are just too darned expensive to run
if (sizeof($aSearch['aName'])) $aTerms[] = "name_vector @> ARRAY[".join($aSearch['aName'], ",")."]";
- if (sizeof($aSearch['aNameNonSearch'])) $aTerms[] = "array_cat(name_vector,ARRAY[]::integer[]) @> ARRAY[".join($aSearch['aNameNonSearch'], ",")."]";
+ //if (sizeof($aSearch['aNameNonSearch'])) $aTerms[] = "array_cat(name_vector,ARRAY[]::integer[]) @> ARRAY[".join($aSearch['aNameNonSearch'], ",")."]";
if (sizeof($aSearch['aAddress']) && $aSearch['aName'] != $aSearch['aAddress']) {
// For infrequent name terms disable index usage for address
if (CONST_Search_NameOnlySearchFrequencyThreshold
&& sizeof($aSearch['aName']) == 1
&& $aWordFrequencyScores[$aSearch['aName'][reset($aSearch['aName'])]] < CONST_Search_NameOnlySearchFrequencyThreshold
) {
- $aTerms[] = "array_cat(nameaddress_vector,ARRAY[]::integer[]) @> ARRAY[".join(array_merge($aSearch['aAddress'], $aSearch['aAddressNonSearch']), ",")."]";
+ //$aTerms[] = "array_cat(nameaddress_vector,ARRAY[]::integer[]) @> ARRAY[".join(array_merge($aSearch['aAddress'], $aSearch['aAddressNonSearch']), ",")."]";
+ $aTerms[] = "array_cat(nameaddress_vector,ARRAY[]::integer[]) @> ARRAY[".join($aSearch['aAddress'],",")."]";
} else {
$aTerms[] = "nameaddress_vector @> ARRAY[".join($aSearch['aAddress'], ",")."]";
- if (sizeof($aSearch['aAddressNonSearch'])) {
+ /*if (sizeof($aSearch['aAddressNonSearch'])) {
$aTerms[] = "array_cat(nameaddress_vector,ARRAY[]::integer[]) @> ARRAY[".join($aSearch['aAddressNonSearch'], ",")."]";
- }
+ }*/
}
}
if ($aSearch['sCountryCode']) $aTerms[] = "country_code = '".pg_escape_string($aSearch['sCountryCode'])."'";
if ($this->bNameDetails) $sSQL .= " null as names,";
$sSQL .= " ST_X(point) as lon, ST_Y(point) as lat from (select *, ST_LineInterpolatePoint(linegeo, (housenumber-startnumber::float)/(endnumber-startnumber)::float) as point from ";
$sSQL .= " (select *, ";
- $sSQL .= " CASE WHEN interpolationtype='odd' THEN floor((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2+1";
+ $sSQL .= " GREATEST(startnumber, LEAST(endnumber, CASE WHEN interpolationtype='odd' THEN floor((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2+1";
$sSQL .= " WHEN interpolationtype='even' THEN ((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2";
$sSQL .= " WHEN interpolationtype='all' THEN (".$fInterpolFraction."*(endnumber-startnumber)+startnumber)::int";
- $sSQL .= " END as housenumber";
+ $sSQL .= " END)) as housenumber";
$sSQL .= " from location_property_tiger where place_id = ".$iPlaceID.") as blub1) as blub2";
} elseif ($bIsInterpolation) {
$sSQL = "select place_id, partition, 'W' as osm_type, osm_id, 'place' as class, 'house' as type, null admin_level, housenumber, null as street, null as isin, postcode,";
if ($this->bNameDetails) $sSQL .= " null as names,";
$sSQL .= " ST_X(point) as lon, ST_Y(point) as lat from (select *, ST_LineInterpolatePoint(linegeo, (housenumber-startnumber::float)/(endnumber-startnumber)::float) as point from ";
$sSQL .= " (select *, ";
- $sSQL .= " CASE WHEN interpolationtype='odd' THEN floor((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2+1";
+ $sSQL .= " GREATEST(startnumber, LEAST(endnumber, CASE WHEN interpolationtype='odd' THEN floor((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2+1";
$sSQL .= " WHEN interpolationtype='even' THEN ((".$fInterpolFraction."*(endnumber-startnumber)+startnumber)/2)::int*2";
$sSQL .= " WHEN interpolationtype='all' THEN (".$fInterpolFraction."*(endnumber-startnumber)+startnumber)::int";
- $sSQL .= " END as housenumber";
+ $sSQL .= " END)) as housenumber";
$sSQL .= " from location_property_osmline where place_id = ".$iPlaceID.") as blub1) as blub2";
// testcase: interpolationtype=odd, startnumber=1000, endnumber=1006, fInterpolFraction=1 => housenumber=1007 => error in st_lineinterpolatepoint
// but this will never happen, because if the searched point is that close to the endnumber, the endnumber house will be directly taken from placex (in ReverseGeocode.php line 220)
//
preg_match_all('/(-?[0-9.]+) (-?[0-9.]+)/', $aMatch[1], $aPolyPoints, PREG_SET_ORDER);
//
- } elseif (preg_match('#MULTIPOLYGON\\(\\(\\(([- 0-9.,]+)#', $geometry_as_text, $aMatch)) {
+/* } elseif (preg_match('#MULTIPOLYGON\\(\\(\\(([- 0-9.,]+)#', $geometry_as_text, $aMatch)) {
//
preg_match_all('/(-?[0-9.]+) (-?[0-9.]+)/', $aMatch[1], $aPolyPoints, PREG_SET_ORDER);
- //
+ */
} elseif (preg_match('#POINT\\((-?[0-9.]+) (-?[0-9.]+)\\)#', $geometry_as_text, $aMatch)) {
//
$aPolyPoints = createPointsAroundCenter($aMatch[1], $aMatch[2], $fRadius);
--- /dev/null
+#!/usr/bin/python3
+#
+# Plugin to monitor the types of requsts made to the API
+#
+# Uses the query log.
+#
+# Parameters:
+#
+# config (required)
+# autoconf (optional - used by munin-config)
+#
+
+import re
+import os
+import sys
+from datetime import datetime, timedelta
+
+CONFIG="""graph_title Total Nominatim response time
+graph_vlabel Time to response
+graph_category Nominatim
+graph_period minute
+graph_args --base 1000
+
+avgs.label Average search time
+avgs.draw LINE
+avgs.type GAUGE
+avgs.min 0
+avgs.info Moving 5 minute average time to perform search
+
+avgr.label Average reverse time
+avgr.draw LINE
+avgr.type GAUGE
+avgr.min 0
+avgr.info Moving 5 minute average time to perform reverse
+
+max.label Slowest time to response (1/100)
+max.draw LINE
+max.type GAUGE
+max.min 0
+max.info Slowest query in last 5 minutes (unit: 100s)"""
+
+ENTRY_REGEX = re.compile(r'\[[^]]+\] (?P<dur>[0-9.]+) (?P<numres>\d+) (?P<type>[a-z]+) ')
+TIME_REGEX = re.compile(r'\[(?P<t_year>\d\d\d\d)-(?P<t_month>\d\d)-(?P<t_day>\d\d) (?P<t_hour>\d\d):(?P<t_min>\d\d):(?P<t_sec>\d\d)[0-9.]*\] ')
+
+
+class LogFile:
+ """ A query log file, unpacked. """
+
+ def __init__(self, filename):
+ self.fd = open(filename, encoding='utf-8', errors='replace')
+ self.len = os.path.getsize(filename)
+
+ def __del__(self):
+ self.fd.close()
+
+ def seek_next(self, abstime):
+ self.fd.seek(abstime)
+ self.fd.readline()
+ l = self.fd.readline()
+ e = TIME_REGEX.match(l)
+ if e is None:
+ return None
+ e = e.groupdict()
+ return datetime(int(e['t_year']), int(e['t_month']), int(e['t_day']),
+ int(e['t_hour']), int(e['t_min']), int(e['t_sec']))
+
+ def seek_to_date(self, target):
+ # start position for binary search
+ fromseek = 0
+ fromdate = self.seek_next(0)
+ if fromdate > target:
+ return True
+ # end position for binary search
+ toseek = -100
+ while -toseek < self.len:
+ todate = self.seek_next(self.len + toseek)
+ if todate is not None:
+ break
+ toseek -= 100
+ if todate is None or todate < target:
+ return False
+ toseek = self.len + toseek
+
+
+ while True:
+ bps = (toseek - fromseek) / (todate - fromdate).total_seconds()
+ newseek = fromseek + int((target - fromdate).total_seconds() * bps)
+ newdate = self.seek_next(newseek)
+ if newdate is None:
+ return False;
+ error = abs((target - newdate).total_seconds())
+ if error < 1:
+ return True
+ if newdate > target:
+ toseek = newseek
+ todate = newdate
+ oldfromseek = fromseek
+ fromseek = toseek - error * bps
+ while True:
+ if fromseek <= oldfromseek:
+ fromseek = oldfromseek
+ fromdate = self.seek_next(fromseek)
+ break
+ fromdate = self.seek_next(fromseek)
+ if fromdate < target:
+ break;
+ bps *=2
+ fromseek -= error * bps
+ else:
+ fromseek = newseek
+ fromdate = newdate
+ oldtoseek = toseek
+ toseek = fromseek + error * bps
+ while True:
+ if toseek > oldtoseek:
+ toseek = oldtoseek
+ todate = self.seek_next(toseek)
+ break
+ todate = self.seek_next(toseek)
+ if todate > target:
+ break
+ bps *=2
+ toseek += error * bps
+ if toseek - fromseek < 500:
+ return True
+
+
+ def loglines(self):
+ for l in self.fd:
+ e = ENTRY_REGEX.match(l)
+ if e is not None:
+ yield e.groupdict()
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) > 1 and sys.argv[1] == 'config':
+ print(CONFIG)
+ sys.exit(0)
+
+ sumrev = 0
+ numrev = 0
+ sumsearch = 0
+ numsearch = 0
+ maxres = 0
+ if 'NOMINATIM_QUERYLOG' in os.environ:
+ lf = LogFile(os.environ['NOMINATIM_QUERYLOG'])
+ if lf.seek_to_date(datetime.now() - timedelta(minutes=5)):
+ for l in lf.loglines():
+ dur = float(l['dur'])
+ if l['type'] == 'reverse':
+ numrev += 1
+ sumrev += dur
+ elif l['type'] == 'search':
+ numsearch += 1
+ sumsearch += dur
+ if dur > maxres:
+ maxres = dur
+
+
+ print('avgs.value', 0 if numsearch == 0 else sumsearch/numsearch)
+ print('avgr.value', 0 if numrev == 0 else sumrev/numrev)
+ print('max.value', maxres/100.0)
--- /dev/null
+#!/usr/bin/python3
+#
+# Plugin to monitor the types of requsts made to the API
+#
+# Uses the query log.
+#
+# Parameters:
+#
+# config (required)
+# autoconf (optional - used by munin-config)
+#
+
+import re
+import os
+import sys
+from datetime import datetime, timedelta
+
+CONFIG="""graph_title Requests by API call
+graph_args --base 1000 -l 0
+graph_vlabel requests per minute
+graph_category nominatim
+z1.label reverse
+z1.draw AREA
+z1.type GAUGE
+z2.label search (successful)
+z2.draw STACK
+z2.type GAUGE
+z3.label search (no result)
+z3.draw STACK
+z3.type GAUGE
+z4.label details
+z4.draw STACK
+z4.type GAUGE"""
+
+ENTRY_REGEX = re.compile(r'\[[^]]+\] (?P<dur>[0-9.]+) (?P<numres>\d+) (?P<type>[a-z]+) ')
+TIME_REGEX = re.compile(r'\[(?P<t_year>\d\d\d\d)-(?P<t_month>\d\d)-(?P<t_day>\d\d) (?P<t_hour>\d\d):(?P<t_min>\d\d):(?P<t_sec>\d\d)[0-9.]*\] ')
+
+
+class LogFile:
+ """ A query log file, unpacked. """
+
+ def __init__(self, filename):
+ self.fd = open(filename, encoding='utf-8', errors='replace')
+ self.len = os.path.getsize(filename)
+
+ def __del__(self):
+ self.fd.close()
+
+ def seek_next(self, abstime):
+ self.fd.seek(abstime)
+ self.fd.readline()
+ l = self.fd.readline()
+ e = TIME_REGEX.match(l)
+ if e is None:
+ return None
+ e = e.groupdict()
+ return datetime(int(e['t_year']), int(e['t_month']), int(e['t_day']),
+ int(e['t_hour']), int(e['t_min']), int(e['t_sec']))
+
+ def seek_to_date(self, target):
+ # start position for binary search
+ fromseek = 0
+ fromdate = self.seek_next(0)
+ if fromdate > target:
+ return True
+ # end position for binary search
+ toseek = -100
+ while -toseek < self.len:
+ todate = self.seek_next(self.len + toseek)
+ if todate is not None:
+ break
+ toseek -= 100
+ if todate is None or todate < target:
+ return False
+ toseek = self.len + toseek
+
+
+ while True:
+ bps = (toseek - fromseek) / (todate - fromdate).total_seconds()
+ newseek = fromseek + int((target - fromdate).total_seconds() * bps)
+ newdate = self.seek_next(newseek)
+ if newdate is None:
+ return False;
+ error = abs((target - newdate).total_seconds())
+ if error < 1:
+ return True
+ if newdate > target:
+ toseek = newseek
+ todate = newdate
+ oldfromseek = fromseek
+ fromseek = toseek - error * bps
+ while True:
+ if fromseek <= oldfromseek:
+ fromseek = oldfromseek
+ fromdate = self.seek_next(fromseek)
+ break
+ fromdate = self.seek_next(fromseek)
+ if fromdate < target:
+ break;
+ bps *=2
+ fromseek -= error * bps
+ else:
+ fromseek = newseek
+ fromdate = newdate
+ oldtoseek = toseek
+ toseek = fromseek + error * bps
+ while True:
+ if toseek > oldtoseek:
+ toseek = oldtoseek
+ todate = self.seek_next(toseek)
+ break
+ todate = self.seek_next(toseek)
+ if todate > target:
+ break
+ bps *=2
+ toseek += error * bps
+ if toseek - fromseek < 500:
+ return True
+
+
+ def loglines(self):
+ for l in self.fd:
+ e = ENTRY_REGEX.match(l)
+ if e is not None:
+ yield e.groupdict()
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) > 1 and sys.argv[1] == 'config':
+ print(CONFIG)
+ sys.exit(0)
+
+ reverse = 0
+ searchy = 0
+ searchn = 0
+ details = 0
+ if 'NOMINATIM_QUERYLOG' in os.environ:
+ lf = LogFile(os.environ['NOMINATIM_QUERYLOG'])
+ if lf.seek_to_date(datetime.now() - timedelta(minutes=5)):
+ for l in lf.loglines():
+ if l['type'] == 'reverse':
+ reverse += 1
+ elif l['type'] == 'search':
+ if l['numres'] == '0':
+ searchn += 1
+ else:
+ searchy += 1
+ else:
+ details += 1
+
+
+ print('z1.value', reverse/5)
+ print('z2.value', searchy/5)
+ print('z3.value', searchn/5)
+ print('z4.value', details/5)
--- /dev/null
+#!/bin/sh
+#
+# Plugin to monitor the number of IPs in special pools
+#
+# Parameters:
+#
+# config (required)
+# autoconf (optional - used by munin-config)
+#
+
+if [ "$1" = "config" ]; then
+
+ echo 'graph_title Restricted IPs'
+ echo 'graph_args -l 0'
+ echo 'graph_vlabel number of IPs'
+ echo 'graph_category nominatim'
+ echo 'bulk.label bulk'
+ echo 'bulk.draw AREA'
+ echo 'bulk.type GAUGE'
+ echo 'block.label blocked'
+ echo 'block.draw STACK'
+ echo 'block.type GAUGE'
+ exit 0
+fi
+
+BASEDIR="$(dirname "$(readlink -f "$0")")"
+
+cut -f 2 -d ' ' $BASEDIR/../../bin/settings/ip_blocks.map | sort | uniq -c | sed 's:[[:space:]]*\([0-9]\+\) \(.*\):\2.value \1:'
CASE WHEN class = 'place' and type = 'postcode' THEN hstore('name', postcode) ELSE name END as name,
CASE WHEN extratags ? 'place' THEN 'place' ELSE class END as class,
CASE WHEN extratags ? 'place' THEN extratags->'place' ELSE type END as type,
- admin_level, fromarea, isaddress,
+ admin_level, fromarea, isaddress and linked_place_id is NULL as isaddress,
CASE WHEN address_place_id = for_place_id AND rank_address = 0 THEN 100 WHEN rank_address = 11 THEN 5 ELSE rank_address END as rank_address,
distance,calculated_country_code,postcode
from place_addressline join placex on (address_place_id = placex.place_id)
GRANT SELECT ON location_property_tiger_import TO "{www-user}";
-DROP TABLE IF EXISTS location_property_tiger;
-ALTER TABLE location_property_tiger_import RENAME TO location_property_tiger;
+--DROP TABLE IF EXISTS location_property_tiger;
+--ALTER TABLE location_property_tiger_import RENAME TO location_property_tiger;
-ALTER INDEX idx_location_property_tiger_parent_place_id_imp RENAME TO idx_location_property_tiger_housenumber_parent_place_id;
-ALTER INDEX idx_location_property_tiger_place_id_imp RENAME TO idx_location_property_tiger_place_id;
+--ALTER INDEX idx_location_property_tiger_parent_place_id_imp RENAME TO idx_location_property_tiger_housenumber_parent_place_id;
+--ALTER INDEX idx_location_property_tiger_place_id_imp RENAME TO idx_location_property_tiger_place_id;
DROP FUNCTION tiger_line_import (linegeo geometry, in_startnumber integer, in_endnumber integer, interpolationtype text, in_street text, in_isin text, in_postcode text);
--- /dev/null
+#!/usr/bin/python
+#
+# Search logs for high-bandwith users and create a list of suspicious IPs.
+# There are three states: bulk, block, ban. The first are bulk requesters
+# that need throtteling, the second bulk requesters that have overdone it
+# and the last manually banned IPs.
+#
+# The list can then be used in apache using rewrite rules to
+# direct bulk users to smaller thread pools or block them. A
+# typical apache config that uses php-fpm pools would look
+# like this:
+#
+# Alias /nominatim-www/ "/var/www/nominatim/"
+# Alias /nominatim-bulk/ "/var/www/nominatim/"
+# <Directory "/var/www/nominatim/">
+# Options MultiViews FollowSymLinks
+# AddType text/html .php
+# </Directory>
+#
+# <Location /nominatim-www>
+# AddHandler fcgi:/var/run/php5-fpm-www.sock .php
+# </Location>
+# <Location /nominatim-bulk>
+# AddHandler fcgi:/var/run/php5-fpm-bulk.sock .php
+# </Location>
+#
+# Redirect 509 /nominatim-block/
+# ErrorDocument 509 "Bandwidth limit exceeded."
+# Redirect 403 /nominatim-ban/
+# ErrorDocument 403 "Access blocked."
+#
+# RewriteEngine On
+# RewriteMap bulklist txt:/home/wherever/ip-block.map
+# RewriteRule ^/(.*) /nominatim-${bulklist:%{REMOTE_ADDR}|www}/$1 [PT]
+#
+
+import os
+import psycopg2
+import datetime
+
+BASEDIR = os.path.normpath(os.path.join(os.path.realpath(__file__), '../..'))
+
+#
+# DEFAULT SETTINGS
+#
+# Copy into settings/ip_blcoks.conf and adapt as required.
+#
+BLOCKEDFILE= BASEDIR + '/settings/ip_blocks.map'
+LOGFILE= BASEDIR + '/log/restricted_ip.log'
+
+# space-separated list of IPs that are never banned
+WHITELIST = ''
+# space-separated list of IPs manually blocked
+BLACKLIST = ''
+# user-agents that should be blocked from bulk mode
+# (matched with startswith)
+UA_BLOCKLIST = ()
+
+# time before a automatically blocked IP is allowed back
+BLOCKCOOLOFF_PERIOD='1 hour'
+# quiet time before an IP is released from the bulk pool
+BULKCOOLOFF_PERIOD='15 min'
+
+BULKLONG_LIMIT=8000
+BULKSHORT_LIMIT=2000
+BLOCK_UPPER=19000
+BLOCK_LOWER=4000
+BLOCK_LOADFAC=380
+BULK_LOADFAC=160
+BULK_LOWER=1500
+MAX_BULK_IPS=85
+
+#
+# END OF DEFAULT SETTINGS
+#
+
+try:
+ execfile(os.path.expanduser(BASEDIR + "/settings/ip_blocks.conf"))
+except IOError:
+ pass
+
+# read the previous blocklist
+WHITELIST = set(WHITELIST.split()) if WHITELIST else set()
+prevblocks = []
+prevbulks = []
+BLACKLIST = set(BLACKLIST.split()) if BLACKLIST else set()
+newblocks = set()
+newbulks = set()
+
+try:
+ fd = open(BLOCKEDFILE)
+ for line in fd:
+ ip, typ = line.strip().split(' ')
+ if ip not in BLACKLIST:
+ if typ == 'block':
+ prevblocks.append(ip)
+ elif typ == 'bulk':
+ prevbulks.append(ip)
+ fd.close()
+except IOError:
+ pass #ignore non-existing file
+
+# determine current load
+fd = open("/proc/loadavg")
+avgload = int(float(fd.readline().split()[2]))
+fd.close()
+# DB load
+conn = psycopg2.connect('dbname=nominatim')
+cur = conn.cursor()
+cur.execute("select count(*)/60 from new_query_log where starttime > now() - interval '1min'")
+dbload = int(cur.fetchone()[0])
+
+BLOCK_LIMIT = max(BLOCK_LOWER, BLOCK_UPPER - BLOCK_LOADFAC * (dbload - 75))
+BULKLONG_LIMIT = max(BULK_LOWER, BULKLONG_LIMIT - BULK_LOADFAC * (avgload - 14))
+if len(prevbulks) > MAX_BULK_IPS:
+ BLOCK_LIMIT = max(3600, BLOCK_LOWER - (len(prevbulks) - MAX_BULK_IPS)*10)
+# if the bulk pool is still empty, clients will be faster, avoid having
+# them blocked in this case
+if len(prevbulks) < 10:
+ BLOCK_LIMIT = 2*BLOCK_UPPER
+
+
+# get the new block candidates
+cur.execute("""
+ SELECT ipaddress, max(count), max(ua) FROM
+ ((SELECT * FROM
+ (SELECT ipaddress, sum(case when endtime is null then 1 else 1+1.5*date_part('epoch',endtime-starttime) end) as count, substring(max(useragent) from 1 for 30) as ua FROM new_query_log
+ WHERE starttime > now() - interval '1 hour' GROUP BY ipaddress) as i
+ WHERE count > %s)
+ UNION
+ (SELECT ipaddress, count * 3, ua FROM
+ (SELECT ipaddress, sum(case when endtime is null then 1 else 1+1.5*date_part('epoch',endtime-starttime) end) as count, substring(max(useragent) from 1 for 30) as ua FROM new_query_log
+ WHERE starttime > now() - interval '10 min' GROUP BY ipaddress) as i
+ WHERE count > %s)) as o
+ GROUP BY ipaddress
+""", (BULKLONG_LIMIT, BULKSHORT_LIMIT))
+
+bulkips = {}
+emergencyblocks = []
+useragentblocks = []
+
+for c in cur:
+ if c[0] not in WHITELIST and c[0] not in BLACKLIST:
+ # check for user agents that receive an immediate block
+ missing_agent = not c[2]
+ if not missing_agent:
+ for ua in UA_BLOCKLIST:
+ if c[2].startswith(ua):
+ missing_agent = True
+ break
+ if (missing_agent or c[1] > BLOCK_UPPER) and c[0] not in prevblocks:
+ newblocks.add(c[0])
+ if missing_agent:
+ useragentblocks.append(c[0])
+ else:
+ emergencyblocks.append(c[0])
+ else:
+ bulkips[c[0]] = c[1]
+
+# IPs from the block list that are no longer in the bulk list
+deblockcandidates = set()
+# IPs from the bulk list that are no longer in the bulk list
+debulkcandidates = set()
+# new IPs to go into the block list
+newlyblocked = []
+
+
+for ip in prevblocks:
+ if ip in bulkips:
+ newblocks.add(ip)
+ del bulkips[ip]
+ else:
+ deblockcandidates.add(ip)
+
+for ip in prevbulks:
+ if ip not in newblocks:
+ if ip in bulkips:
+ if bulkips[ip] > BLOCK_LIMIT:
+ newblocks.add(ip)
+ newlyblocked.append(ip)
+ else:
+ newbulks.add(ip)
+ del bulkips[ip]
+ else:
+ debulkcandidates.add(ip)
+
+# cross-check deblock candidates
+if deblockcandidates:
+ cur.execute("""
+ SELECT DISTINCT ipaddress FROM new_query_log
+ WHERE ipaddress IN ('%s') AND starttime > now() - interval '%s'
+ """ % ("','".join(deblockcandidates), BLOCKCOOLOFF_PERIOD))
+
+ for c in cur:
+ newblocks.add(c[0])
+ deblockcandidates.remove(c[0])
+# deblocked IPs go back to the bulk pool to catch the ones that simply
+# ignored the HTTP error and just continue to hammer the API.
+# Those that behave and stopped will be debulked a minute later.
+for ip in deblockcandidates:
+ newbulks.add(ip)
+
+# cross-check debulk candidates
+if debulkcandidates:
+ cur.execute("""
+ SELECT DISTINCT ipaddress FROM new_query_log
+ WHERE ipaddress IN ('%s') AND starttime > now() - interval '%s'
+ AND starttime > date_trunc('day', now())
+ """ % ("','".join(debulkcandidates), BULKCOOLOFF_PERIOD))
+
+ for c in cur:
+ newbulks.add(c[0])
+ debulkcandidates.remove(c[0])
+
+for ip in bulkips.iterkeys():
+ newbulks.add(ip)
+
+# write out the new list
+fd = open(BLOCKEDFILE, 'w')
+for ip in newblocks:
+ fd.write(ip + " block\n")
+for ip in newbulks:
+ fd.write(ip + " bulk\n")
+for ip in BLACKLIST:
+ fd.write(ip + " ban\n")
+fd.close()
+
+# write out the log
+logstr = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + ' %s %s\n'
+fd = open(LOGFILE, 'a')
+if deblockcandidates:
+ fd.write(logstr % ('unblocked:', ', '.join(deblockcandidates)))
+if debulkcandidates:
+ fd.write(logstr % (' debulked:', ', '.join(debulkcandidates)))
+if bulkips:
+ fd.write(logstr % ('new bulks:', ', '.join(bulkips.keys())))
+if emergencyblocks:
+ fd.write(logstr % ('dir.block:', ', '.join(emergencyblocks)))
+if useragentblocks:
+ fd.write(logstr % (' ua block:', ', '.join(useragentblocks)))
+if newlyblocked:
+ fd.write(logstr % ('new block:', ', '.join(newlyblocked)))
+fd.close()
--- /dev/null
+#!/usr/bin/python3
+#
+# Search apache logs for high-bandwith users and create a list of suspicious IPs.
+# There are three states: bulk, block, ban. The first are bulk requesters
+# that need throtteling, the second bulk requesters that have overdone it
+# and the last manually banned IPs.
+#
+
+import re
+import os
+import sys
+import subprocess
+from datetime import datetime, timedelta
+from collections import defaultdict
+
+#
+# DEFAULT SETTINGS
+#
+# Copy into settings/ip_blcoks.conf and adapt as required.
+#
+BASEDIR = os.path.normpath(os.path.join(os.path.realpath(__file__), '../..'))
+BLOCKEDFILE= BASEDIR + '/settings/ip_blocks.map'
+LOGFILE= BASEDIR + '/log/restricted_ip.log'
+
+# space-separated list of IPs that are never banned
+WHITELIST = ''
+# space-separated list of IPs manually blocked
+BLACKLIST = ''
+# user-agents that should be blocked from bulk mode
+# (matched with startswith)
+UA_BLOCKLIST = ()
+
+# time before a automatically blocked IP is allowed back
+BLOCKCOOLOFF_DELTA=timedelta(hours=1)
+# quiet time before an IP is released from the bulk pool
+BULKCOOLOFF_DELTA=timedelta(minutes=15)
+
+BULKLONG_LIMIT=8000
+BULKSHORT_LIMIT=2000
+BLOCK_UPPER=19000
+BLOCK_LOWER=4000
+BLOCK_LOADFAC=380
+BULK_LOADFAC=160
+BULK_LOWER=1500
+MAX_BULK_IPS=85
+
+#
+# END OF DEFAULT SETTINGS
+#
+
+try:
+ with open(BASEDIR + "/settings/ip_blocks.conf") as f:
+ code = compile(f.read(), BASEDIR + "/settings/ip_blocks.conf", 'exec')
+ exec(code)
+except IOError:
+ pass
+
+BLOCK_LIMIT = BLOCK_LOWER
+
+time_regex = r'(?P<t_day>\d\d)/(?P<t_month>[A-Za-z]+)/(?P<t_year>\d\d\d\d):(?P<t_hour>\d\d):(?P<t_min>\d\d):(?P<t_sec>\d\d) [+-]\d\d\d\d'
+
+format_pat= re.compile(r'(?P<ip>[a-f\d\.:]+) - - \['+ time_regex + r'] "(?P<query>.*?)" (?P<return>\d+) (?P<bytes>\d+) "(?P<referer>.*?)" "(?P<ua>.*?)"')
+time_pat= re.compile(r'[a-f\d:\.]+ - - \[' + time_regex + '\] ')
+
+logtime_pat = "%d/%b/%Y:%H:%M:%S %z"
+
+MONTHS = { 'Jan' : 1, 'Feb' : 2, 'Mar' : 3, 'Apr' : 4, 'May' : 5, 'Jun' : 6,
+ 'Jul' : 7, 'Aug' : 8, 'Sep' : 9, 'Oct' : 10, 'Nov' : 11, 'Dec' : 12 }
+
+class LogEntry:
+ def __init__(self, logline):
+ e = format_pat.match(logline)
+ if e is None:
+ raise ValueError("Invalid log line:", logline)
+ e = e.groupdict()
+ self.ip = e['ip']
+ self.date = datetime(int(e['t_year']), MONTHS[e['t_month']], int(e['t_day']),
+ int(e['t_hour']), int(e['t_min']), int(e['t_sec']))
+ qp = e['query'].split(' ', 2)
+ if len(qp) < 2:
+ self.request = None
+ self.query = None
+ else:
+ self.query = qp[1]
+ if qp[0] == 'OPTIONS':
+ self.request = None
+ else:
+ if '/search' in qp[1]:
+ self.request = 'S'
+ elif '/reverse' in qp[1]:
+ self.request = 'R'
+ elif '/details' in qp[1]:
+ self.request = 'D'
+ else:
+ self.request = None
+ self.query = e['query']
+ self.retcode = int(e['return'])
+ self.referer = e['referer'] if e['referer'] != '-' else None
+ self.ua = e['ua'] if e['ua'] != '-' else None
+
+ def get_log_time(logline):
+ e = format_pat.match(logline)
+ if e is None:
+ return None
+ e = e.groupdict()
+ #return datetime.strptime(e['time'], logtime_pat).replace(tzinfo=None)
+ return datetime(int(e['t_year']), MONTHS[e['t_month']], int(e['t_day']),
+ int(e['t_hour']), int(e['t_min']), int(e['t_sec']))
+
+
+class LogFile:
+ """ An apache log file, unpacked. """
+
+ def __init__(self, filename):
+ self.fd = open(filename)
+ self.len = os.path.getsize(filename)
+
+ def __del__(self):
+ self.fd.close()
+
+ def seek_next(self, abstime):
+ self.fd.seek(abstime)
+ self.fd.readline()
+ l = self.fd.readline()
+ return LogEntry.get_log_time(l) if l is not None else None
+
+ def seek_to_date(self, target):
+ # start position for binary search
+ fromseek = 0
+ fromdate = self.seek_next(0)
+ if fromdate > target:
+ return True
+ # end position for binary search
+ toseek = -100
+ while -toseek < self.len:
+ todate = self.seek_next(self.len + toseek)
+ if todate is not None:
+ break
+ toseek -= 100
+ if todate is None or todate < target:
+ return False
+ toseek = self.len + toseek
+
+
+ while True:
+ bps = (toseek - fromseek) / (todate - fromdate).total_seconds()
+ newseek = fromseek + int((target - fromdate).total_seconds() * bps)
+ newdate = self.seek_next(newseek)
+ if newdate is None:
+ return False;
+ error = abs((target - newdate).total_seconds())
+ if error < 1:
+ return True
+ if newdate > target:
+ toseek = newseek
+ todate = newdate
+ oldfromseek = fromseek
+ fromseek = toseek - error * bps
+ while True:
+ if fromseek <= oldfromseek:
+ fromseek = oldfromseek
+ fromdate = self.seek_next(fromseek)
+ break
+ fromdate = self.seek_next(fromseek)
+ if fromdate < target:
+ break;
+ bps *=2
+ fromseek -= error * bps
+ else:
+ fromseek = newseek
+ fromdate = newdate
+ oldtoseek = toseek
+ toseek = fromseek + error * bps
+ while True:
+ if toseek > oldtoseek:
+ toseek = oldtoseek
+ todate = self.seek_next(toseek)
+ break
+ todate = self.seek_next(toseek)
+ if todate > target:
+ break
+ bps *=2
+ toseek += error * bps
+ if toseek - fromseek < 500:
+ return True
+
+
+ def loglines(self):
+ for l in self.fd:
+ try:
+ yield LogEntry(l)
+ except ValueError:
+ pass # ignore invalid lines
+
+class BlockList:
+
+ def __init__(self):
+ self.whitelist = set(WHITELIST.split()) if WHITELIST else set()
+ self.blacklist = set(BLACKLIST.split()) if BLACKLIST else set()
+ self.prevblocks = set()
+ self.prevbulks = set()
+
+ try:
+ fd = open(BLOCKEDFILE)
+ for line in fd:
+ ip, typ = line.strip().split(' ')
+ if ip not in self.blacklist:
+ if typ == 'block':
+ self.prevblocks.add(ip)
+ elif typ == 'bulk':
+ self.prevbulks.add(ip)
+ fd.close()
+ except IOError:
+ pass #ignore non-existing file
+
+
+class IPstats:
+
+ def __init__(self):
+ self.short_total = 0
+ self.short_api = 0
+ self.long_total = 0
+ self.long_api = 0
+ self.bad_ua = False
+
+ def add_long(self, logentry):
+ self.long_total += 1
+ if logentry.request is not None:
+ self.long_api += 1
+ if not self.bad_ua:
+ if logentry.ua is None:
+ self.bad_ua = True
+
+ def add_short(self, logentry):
+ self.short_total += 1
+ if logentry.request is not None:
+ self.short_api += 1
+ self.add_long(logentry)
+
+ def new_state(self, was_blocked, was_bulked):
+ if was_blocked:
+ # deblock only if the IP has been really quiet
+ # (properly catches the ones that simply ignore the HTTP error)
+ return None if self.long_total < 20 else 'block'
+ if self.long_api > BLOCK_UPPER or self.short_api > BLOCK_UPPER / 3:
+ # client totally overdoing it
+ return 'block'
+ if was_bulked:
+ if self.short_total < 20:
+ # client has stopped, debulk
+ return None
+ if self.long_api > BLOCK_LIMIT or self.short_api > BLOCK_LIMIT / 3:
+ # client is still hammering us, block
+ return 'emblock'
+ return 'bulk'
+
+ if self.long_api > BULKLONG_LIMIT or self.short_api > BULKSHORT_LIMIT:
+ #if self.bad_ua:
+ # return 'uablock' # bad useragent
+ return 'bulk'
+
+ return None
+
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print("Usage: %s logfile startdate" % sys.argv[0])
+ sys.exit(-1)
+
+ if len(sys.argv) == 2:
+ dt = datetime.now() - BLOCKCOOLOFF_DELTA
+ else:
+ dt = datetime.strptime(sys.argv[2], "%Y-%m-%d %H:%M:%S")
+
+ if os.path.getsize(sys.argv[1]) < 2*1030*1024:
+ sys.exit(0) # not enough data
+
+ lf = LogFile(sys.argv[1])
+ if not lf.seek_to_date(dt):
+ sys.exit(0)
+
+ bl = BlockList()
+
+ shortstart = dt + BLOCKCOOLOFF_DELTA - BULKCOOLOFF_DELTA
+ notlogged = bl.whitelist | bl.blacklist
+
+ stats = defaultdict(IPstats)
+
+ for l in lf.loglines():
+ if l.ip not in notlogged:
+ stats[l.ip].add_long(l)
+ if l.date > shortstart:
+ break
+
+ total200 = 0
+ for l in lf.loglines():
+ if l.ip not in notlogged:
+ stats[l.ip].add_short(l)
+ if l.request is not None and l.retcode == 200:
+ total200 += 1
+
+ # adapt limits according to CPU and DB load
+ fd = open("/proc/loadavg")
+ cpuload = int(float(fd.readline().split()[2]))
+ fd.close()
+ # check the number of excess connections to apache
+ dbcons = int(subprocess.check_output("netstat -s | grep 'connections established' | sed 's:^\s*::;s: .*::'", shell=True))
+ fpms = int(subprocess.check_output('ps -Af | grep php-fpm | wc -l', shell=True))
+ dbload = max(0, dbcons - fpms)
+
+ numbulks = len(bl.prevbulks)
+ BLOCK_LIMIT = max(BLOCK_LIMIT, BLOCK_UPPER - BLOCK_LOADFAC * dbload)
+ BULKLONG_LIMIT = max(BULK_LOWER, BULKLONG_LIMIT - BULK_LOADFAC * cpuload)
+ if numbulks > MAX_BULK_IPS:
+ BLOCK_LIMIT = max(3600, BLOCK_LOWER - (numbulks - MAX_BULK_IPS)*10)
+ # if the bulk pool is still empty, clients will be faster, avoid having
+ # them blocked in this case
+ if numbulks < 10:
+ BLOCK_UPPER *= 2
+ BLOCK_LIMIT = BLOCK_UPPER
+
+
+ # collecting statistics
+ unblocked = []
+ debulked = []
+ bulked = []
+ blocked = []
+ uablocked = []
+ emblocked = []
+ # write out new state file
+ fd = open(BLOCKEDFILE, 'w')
+ for k,v in stats.items():
+ wasblocked = k in bl.prevblocks
+ wasbulked = k in bl.prevbulks
+ state = v.new_state(wasblocked, wasbulked)
+ if state is not None:
+ if state == 'uablock':
+ uablocked.append(k)
+ state = 'block'
+ elif state == 'emblock':
+ emblocked.append(k)
+ state = 'block'
+ elif state == 'block':
+ if not wasblocked:
+ blocked.append(k)
+ elif state == 'bulk':
+ if not wasbulked:
+ bulked.append(k)
+ fd.write("%s %s\n" % (k, state))
+ else:
+ if wasblocked:
+ unblocked.append(k)
+ elif wasbulked:
+ debulked.append(k)
+ for i in bl.blacklist:
+ fd.write("%s ban\n" % i)
+ fd.close()
+
+ # TODO write logs (need to collect some statistics)
+ logstr = datetime.now().strftime('%Y-%m-%d %H:%M') + ' %s %s\n'
+ fd = open(LOGFILE, 'a')
+ if unblocked:
+ fd.write(logstr % ('unblocked:', ', '.join(unblocked)))
+ if debulked:
+ fd.write(logstr % (' debulked:', ', '.join(debulked)))
+ if bulked:
+ fd.write(logstr % ('new bulks:', ', '.join(bulked)))
+ if emblocked:
+ fd.write(logstr % ('dir.block:', ', '.join(emblocked)))
+ if uablocked:
+ fd.write(logstr % (' ua block:', ', '.join(uablocked)))
+ if blocked:
+ fd.write(logstr % ('new block:', ', '.join(blocked)))
+ fd.close()
--- /dev/null
+#!/bin/bash -e
+#
+# Rotate query logs.
+
+dbname=nominatim
+
+basedir=`dirname $0`
+logfile=`date "+$basedir/../log/query-%F.log.gz"`
+
+# dump the old logfile
+pg_dump -a -F p -t backup_query_log $dbname | gzip -9 > $logfile
+
+# remove the old logs
+psql -q -d $dbname -c 'DROP TABLE backup_query_log'
+
+# rotate
+psql -q -1 -d $dbname -c 'ALTER TABLE new_query_log RENAME TO backup_query_log;CREATE TABLE new_query_log as (select * from backup_query_log limit 0);GRANT SELECT, INSERT, UPDATE ON new_query_log TO "www-data"'
+psql -q -d $dbname -c 'ALTER INDEX idx_new_query_log_starttime RENAME TO idx_backup_query_log_starttime'
+psql -q -d $dbname -c 'CREATE INDEX idx_new_query_log_starttime ON new_query_log USING BTREE (starttime)'
+
--- /dev/null
+#!/bin/bash
+#
+# Vaccum all tables with indices on integer arrays.
+# Agressive vacuuming seems to help against index bloat.
+#
+
+psql -q -d nominatim -c 'VACUUM ANALYSE search_name'
+psql -q -d nominatim -c 'VACUUM ANALYSE search_name_country'
+#psql -q -d nominatim -c 'VACUUM ANALYSE planet_osm_ways'
+
+for i in `seq 0 246`; do
+ psql -q -d nominatim -c "VACUUM ANALYSE search_name_${i}"
+done
+
// Index file
$sThisIndexCmd = $sCMDIndex;
+ if (!isset($aResult['index-instances'])) {
+ if (getLoadAverage() < 24)
+ $iIndexInstances = 2;
+ else
+ $iIndexInstances = 1;
+ } else
+ $iIndexInstances = $aResult['index-instances'];
+
+ $sThisIndexCmd = $sCMDIndex.' -t '.$iIndexInstances;
$fCMDStartTime = time();
if (!$aResult['no-index']) {
--- /dev/null
+<html>
+<head>
+<title>Access blocked</title>
+</head>
+<body>
+<h1>Access blocked</h1>
+
+<p>You have been blocked because you have violated the
+<a href="http://wiki.openstreetmap.org/wiki/Nominatim_usage_policy">usage policy</a>
+of OSM's Nominatim geocoding service. Please be aware that OSM's resources are
+limited and shared between many users. The usage policy is there to ensure that
+the service remains usable for everybody.</p>
+
+<p>Please review the terms and make sure that your
+software adheres to the terms. You should in particular verify that you have set a
++<b>valid</b> referrer or a user agent that identifies your application, and
+that you are not overusing the service with massive bulk requests.</p>
+
+<p>If you feel that this block is unjustified or remains after you have adopted
+your usage, you may contact the Nominatim system administrator at
+nominatim@openstreetmap.org to have this block lifted.</p>
+</body>
+</head>
--- /dev/null
+<html>
+<head>
+<title>Bandwidth limit exceeded</title>
+</head>
+<body>
+<h1>Bandwidth limit exceeded</h1>
+
+<p>You have been temporarily blocked because you have been overusing OSM's geocoding service or because you have not provided sufficient identification of your application. This block will be automatically lifted after a while. Please take the time and adapt your scripts to reduce the number of requests and make sure that you send a valid UserAgent or Referer.</p>
+
+<p>For more information, consult the <a href="http://wiki.openstreetmap.org/wiki/Nominatim_usage_policy">usage policy</a> for the OSM Nominatim server.
+</body>
+</head>
--- /dev/null
+<?xml version="1.0"?>
+ <!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
+ <cross-domain-policy>
+ <allow-access-from domain="*" />
+ </cross-domain-policy>
--- /dev/null
+<?php
+ @define('CONST_ConnectionBucket_PageType', 'Status');
+
+ require_once(dirname(dirname(__FILE__)).'/lib/init-website.php');
+
+ function statusError($sMsg)
+ {
+ header("HTTP/1.0 500 Internal Server Error");
+ echo "ERROR: ".$sMsg;
+ exit;
+ }
+
+ $oDB =& DB::connect(CONST_Database_DSN, false);
+ if (!$oDB || PEAR::isError($oDB))
+ {
+ statusError("No database");
+ }
+
+ $sLastUpdate = $oDB->getOne("select * from import_status");
+ if (PEAR::isError($sLastUpdate))
+ {
+ statusError("Update status unknown.");
+ }
+ echo $sLastUpdate;
+ exit;
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/"
+ xmlns:moz="http://www.mozilla.org/2006/browser/search/">
+ <ShortName>Nominatim</ShortName>
+ <LongName>Nominatim OSM Search</LongName>
+ <Description>Search for a place in OpenStreetMap Nominatim</Description>
+ <InputEncoding>UTF-8</InputEncoding>
+ <OutputEncoding>UTF-8</OutputEncoding>
+ <Url type="text/html" method="get" template="http://nominatim.openstreetmap.org/search/?q={searchTerms}" />
+ <Query role="example" searchTerms="Reigate" />
+ <Developer>Brian Quinion</Developer>
+ <AdultContent>false</AdultContent>
+ <Attribution>Data &copy; OpenStreetMap contributors, Some Rights Reserved. ODbL, http://www.osm.org/copyright.</Attribution>
+</OpenSearchDescription>
+
$aPlace = [];
}
+logEnd($oDB, $hLog, sizeof($aPlace)?1:0);
if (CONST_Debug) {
var_dump($aPlace);
--- /dev/null
+User-agent: ia_archiver
+Allow: /
+
+User-agent: *
+Disallow: /search.php
+Disallow: /search
+Disallow: /details.php
+Disallow: /details
+Disallow: /reverse.php
+Disallow: /reverse
+Disallow: /hierarchy
+Disallow: /hierarchy.php
+Disallow: /lookup
+Disallow: /lookup.php
--- /dev/null
+{
+ "data_format": 1,
+ "data_url": "http://nominatim.openstreetmap.org/taginfo.json",
+ "project": {
+ "name": "Nominatim",
+ "description": "OSM search engine.",
+ "project_url": "http://nominatim.openstreetmap.org",
+ "doc_url": "http://wiki.osm.org/wiki/Nominatim",
+ "contact_name": "Sarah Hoffmann",
+ "contact_email": "lonvia@denofr.de"
+ },
+ "tags": [
+ { "key" : "ref", "description": "Searchable name of the place."},
+ { "key" : "int_ref", "description": "Searchable name of the place."},
+ { "key" : "nat_ref", "description": "Searchable name of the place."},
+ { "key" : "reg_ref", "description": "Searchable name of the place."},
+ { "key" : "loc_ref", "description": "Searchable name of the place."},
+ { "key" : "old_ref", "description": "Searchable name of the place."},
+ { "key" : "iata", "description": "Searchable name of the place."},
+ { "key" : "icao", "description": "Searchable name of the place."},
+ { "key" : "pcode:1", "description": "Searchable name of the place."},
+ { "key" : "pcode:2", "description": "Searchable name of the place."},
+ { "key" : "pcode:3", "description": "Searchable name of the place."},
+ { "key" : "name", "description": "Searchable name of the place."},
+ { "key" : "int_name", "description": "Searchable name of the place."},
+ { "key" : "nat_name", "description": "Searchable name of the place."},
+ { "key" : "reg_name", "description": "Searchable name of the place."},
+ { "key" : "loc_name", "description": "Searchable name of the place."},
+ { "key" : "old_name", "description": "Searchable name of the place."},
+ { "key" : "alt_name", "description": "Searchable name of the place."},
+ { "key" : "official_name", "description": "Searchable name of the place."},
+ { "key" : "place_name", "description": "Searchable name of the place."},
+ { "key" : "short_name", "description": "Searchable name of the place."},
+ { "key" : "addr:housename", "description": "Searchable name of the place."},
+ { "key" : "operator", "description": "Searchable name for amenities and shops." },
+ { "key" : "brand", "description": "Searchable name of POI places."},
+ { "key" : "bridge:name", "description" : "Searchable name for bridges."},
+ { "key" : "tunnel:name", "description" : "Searchable name for tunnels."},
+ { "key" : "emergency", "description": "POI in the search database." },
+ { "key" : "tourism", "description": "POI in the search database." },
+ { "key" : "historic", "description": "POI in the search database." },
+ { "key" : "military", "description": "POI in the search database." },
+ { "key" : "natural", "description": "POI in the search database." },
+ { "key" : "man_made", "description": "POI in the search database." },
+ { "key" : "mountain_pass", "description": "POI in the search database." },
+ { "key" : "highway", "description": "POI or street in the search database (not added are: 'no', 'turning_circle', 'traffic_signals', 'mini_roundabout', 'noexit', 'crossing')." },
+ { "key" : "aerialway", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "aeroway", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "amenity", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "boundary", "description": "Area in the search database (used to compute addresses of other places)." },
+ { "key" : "bridge", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "craft", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "leisure", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "office", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "railway", "description": "Geographic feature in the search database (unless value is 'no')." },
+ { "key" : "landuse", "description": "Geographic feature in the search database (unless value is 'no')." },
+ { "key" : "shop", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "tunnel", "description": "POI in the search database (unless value is 'no')." },
+ { "key" : "waterway", "description": "Geographic feature in the search database (unless value is 'riverbank')."},
+ { "key" : "place", "description": "Settlement on the search database (used to compute addresses of other places)." },
+ { "key" : "postal_code", "description": "Postcode in search database (used to compute postcodes of places around)." },
+ { "key" : "postcode", "description": "Postcode in search database (used to compute postcodes of places around)." },
+ { "key" : "addr:postcode", "description": "Postcode in search database (used to compute postcodes of places around)." },
+ { "key" : "tiger:zip_left", "description": "Postcode in search database (used to compute postcodes of places around)." },
+ { "key" : "tiger:zip_right", "description": "Postcode in search database (used to compute postcodes of places around)." },
+ { "key" : "addr:street", "description": "Used to determine the street of a house or POI. Note that a street with the same name must exist for the tag to be effective."},
+ { "key" : "addr:place", "description": "Used to determine the settlement of a house or POI with a street-less address. Note that a place with the same name must exist for the tag to be effective."},
+ { "key" : "country_code", "description": "Used to determine the country a place is in."},
+ { "key" : "ISO3166-1", "description": "Used to determine the country a place is in."},
+ { "key" : "is_in:country_code", "description": "Used to determine the country a place is in."},
+ { "key" : "addr:country", "description": "Used to determine the country a place is in."},
+ { "key" : "addr:country_code", "description": "Used to determine the country a place is in."},
+ { "key" : "addr:housenumber", "description": "House number of the place (no ranges)."},
+ { "key" : "addr:conscriptionnumber", "description": "House number of the place (Eastern European system)."},
+ { "key" : "addr:streetnumber", "description": "House number of the place (Eastern European system)."},
+ { "key" : "addr:interpolation", "description": "Way along which house numbers are interpolated."} ,
+ { "key" : "tiger:county", "description": "Used to determine the address in the US (needs a place with the same name and a county suffix)."},
+ { "key" : "is_in", "description": "Used to determine the address of a place. Note that a place with the same name must exist for this to work."},
+ { "key" : "addr:suburb", "description": "Used to determine the address of a place. Note that a place with the same name must exist for this to work."},
+ { "key" : "addr:city", "description": "Used to determine the address of a place. Note that a place with the same name must exist for this to work."},
+ { "key" : "addr:state_code", "description": "Used to determine the address of a place. Note that a place with the same name must exist for this to work."},
+ { "key" : "addr:state", "description": "Used to determine the address of a place. Note that a place with the same name must exist for this to work."},
+ { "key" : "admin_level", "description": "Determines the hierarchy for administrative boundaries."},
+ { "key" : "wikipedia", "description": "Linking to the right wikipedia article helps to guess the importance of a place, which determines how far up in the search results it appears."}
+ ]
+}