X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/0a26ca7104940447ea98a1edf0e7c443da09d37f..5469d02d03a8cc7a9e8aec8302668c50eb19b50c:/nominatim/nominatim.py?ds=sidebyside diff --git a/nominatim/nominatim.py b/nominatim/nominatim.py old mode 100644 new mode 100755 index 61907060..0db0777d --- a/nominatim/nominatim.py +++ b/nominatim/nominatim.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 #----------------------------------------------------------------------------- # nominatim - [description] #----------------------------------------------------------------------------- @@ -30,98 +30,188 @@ import getpass from datetime import datetime import psycopg2 from psycopg2.extras import wait_select -import threading -from queue import Queue +import select log = logging.getLogger() def make_connection(options, asynchronous=False): - return psycopg2.connect(dbname=options.dbname, user=options.user, - password=options.password, host=options.host, - port=options.port, async_=asynchronous) + params = {'dbname' : options.dbname, + 'user' : options.user, + 'password' : options.password, + 'host' : options.host, + 'port' : options.port, + 'async' : asynchronous} -class IndexingThread(threading.Thread): + return psycopg2.connect(**params) - def __init__(self, queue, barrier, options): - super().__init__() - self.conn = make_connection(options) - self.conn.autocommit = True - self.cursor = self.conn.cursor() - self.perform("SET lc_messages TO 'C'") - self.perform(InterpolationRunner.prepare()) - self.perform(RankRunner.prepare()) - self.queue = queue - self.barrier = barrier +class RankRunner(object): + """ Returns SQL commands for indexing one rank within the placex table. + """ - def run(self): - sql = None - while True: - item = self.queue.get() - if item is None: - break - elif isinstance(item, str): - sql = item - self.barrier.wait() - else: - self.perform(sql, (item,)) + def __init__(self, rank): + self.rank = rank - def perform(self, sql, args=None): + def name(self): + return "rank {}".format(self.rank) + + def sql_index_sectors(self): + return """SELECT geometry_sector, count(*) FROM placex + WHERE rank_search = {} and indexed_status > 0 + GROUP BY geometry_sector + ORDER BY geometry_sector""".format(self.rank) + + def sql_nosector_places(self): + return """SELECT place_id FROM placex + WHERE indexed_status > 0 and rank_search = {} + ORDER BY geometry_sector""".format(self.rank) + + def sql_sector_places(self): + return """SELECT place_id FROM placex + WHERE indexed_status > 0 and rank_search = {} + and geometry_sector = %s""".format(self.rank) + + def sql_index_place(self): + return "UPDATE placex SET indexed_status = 0 WHERE place_id = %s" + + +class InterpolationRunner(object): + """ Returns SQL commands for indexing the address interpolation table + location_property_osmline. + """ + + def name(self): + return "interpolation lines (location_property_osmline)" + + def sql_index_sectors(self): + return """SELECT geometry_sector, count(*) FROM location_property_osmline + WHERE indexed_status > 0 + GROUP BY geometry_sector + ORDER BY geometry_sector""" + + def sql_nosector_places(self): + return """SELECT place_id FROM location_property_osmline + WHERE indexed_status > 0 + ORDER BY geometry_sector""" + + def sql_sector_places(self): + return """SELECT place_id FROM location_property_osmline + WHERE indexed_status > 0 and geometry_sector = %s + ORDER BY geometry_sector""" + + def sql_index_place(self): + return """UPDATE location_property_osmline + SET indexed_status = 0 WHERE place_id = %s""" + + +class DBConnection(object): + """ A single non-blocking database connection. + """ + + def __init__(self, options): + self.current_query = None + self.current_params = None + + self.conn = None + self.connect() + + def connect(self): + if self.conn is not None: + self.cursor.close() + self.conn.close() + + self.conn = make_connection(options, asynchronous=True) + self.wait() + + self.cursor = self.conn.cursor() + + def wait(self): + """ Block until any pending operation is done. + """ while True: try: - self.cursor.execute(sql, args) + wait_select(self.conn) + self.current_query = None return except psycopg2.extensions.TransactionRollbackError as e: - if e.pgcode is None: - raise RuntimeError("Postgres exception has no error code") if e.pgcode == '40P01': - log.info("Deadlock detected, retry.") + log.info("Deadlock detected (params = {}), retry." + .format(self.current_params)) + self.cursor.execute(self.current_query, self.current_params) else: raise + except psycopg2.errors.DeadlockDetected: + self.cursor.execute(self.current_query, self.current_params) + def perform(self, sql, args=None): + """ Send SQL query to the server. Returns immediately without + blocking. + """ + self.current_query = sql + self.current_params = args + self.cursor.execute(sql, args) + + def fileno(self): + """ File descriptor to wait for. (Makes this class select()able.) + """ + return self.conn.fileno() + + def is_done(self): + """ Check if the connection is available for a new query. + + Also checks if the previous query has run into a deadlock. + If so, then the previous query is repeated. + """ + if self.current_query is None: + return True + + try: + if self.conn.poll() == psycopg2.extensions.POLL_OK: + self.current_query = None + return True + except psycopg2.extensions.TransactionRollbackError as e: + if e.pgcode == '40P01': + log.info("Deadlock detected (params = {}), retry.".format(self.current_params)) + self.cursor.execute(self.current_query, self.current_params) + else: + raise + except psycopg2.errors.DeadlockDetected: + self.cursor.execute(self.current_query, self.current_params) + + return False class Indexer(object): + """ Main indexing routine. + """ def __init__(self, options): - self.options = options + self.minrank = max(0, options.minrank) + self.maxrank = min(30, options.maxrank) self.conn = make_connection(options) - - self.threads = [] - self.queue = Queue(maxsize=1000) - self.barrier = threading.Barrier(options.threads + 1) - for i in range(options.threads): - t = IndexingThread(self.queue, self.barrier, options) - self.threads.append(t) - t.start() + self.threads = [DBConnection(options) for i in range(options.threads)] def run(self): - log.info("Starting indexing rank ({} to {}) using {} threads".format( - self.options.minrank, self.options.maxrank, - self.options.threads)) + """ Run indexing over the entire database. + """ + log.warning("Starting indexing rank ({} to {}) using {} threads".format( + self.minrank, self.maxrank, len(self.threads))) - for rank in range(self.options.minrank, 30): + for rank in range(self.minrank, self.maxrank): self.index(RankRunner(rank)) - if self.options.maxrank >= 30: + if self.maxrank == 30: self.index(InterpolationRunner()) - self.index(RankRunner(30)) - self.queue_all(None) - for t in self.threads: - t.join() - - def queue_all(self, item): - for t in self.threads: - self.queue.put(item) + self.index(RankRunner(self.maxrank)) def index(self, obj): - log.info("Starting {}".format(obj.name())) - - self.queue_all(obj.sql_index_place()) - self.barrier.wait() + """ Index a single rank or table. `obj` describes the SQL to use + for indexing. + """ + log.warning("Starting {}".format(obj.name())) - cur = self.conn.cursor(name="main") + cur = self.conn.cursor(name='main') cur.execute(obj.sql_index_sectors()) total_tuples = 0 @@ -131,28 +221,47 @@ class Indexer(object): cur.scroll(0, mode='absolute') + next_thread = self.find_free_thread() done_tuples = 0 rank_start_time = datetime.now() + + sector_sql = obj.sql_sector_places() + index_sql = obj.sql_index_place() + min_grouped_tuples = total_tuples - len(self.threads) * 1000 + + next_info = 100 if log.isEnabledFor(logging.INFO) else total_tuples + 1 + for r in cur: sector = r[0] # Should we do the remaining ones together? - do_all = total_tuples - done_tuples < len(self.threads) * 1000 + do_all = done_tuples > min_grouped_tuples pcur = self.conn.cursor(name='places') if do_all: pcur.execute(obj.sql_nosector_places()) else: - pcur.execute(obj.sql_sector_places(), (sector, )) + pcur.execute(sector_sql, (sector, )) for place in pcur: place_id = place[0] log.debug("Processing place {}".format(place_id)) + thread = next(next_thread) - self.queue.put(place_id) + thread.perform(index_sql, (place_id,)) done_tuples += 1 + if done_tuples >= next_info: + now = datetime.now() + done_time = (now - rank_start_time).total_seconds() + tuples_per_sec = done_tuples / done_time + log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}" + .format(done_tuples, int(done_time), + tuples_per_sec, obj.name(), + (total_tuples - done_tuples)/tuples_per_sec)) + next_info += int(tuples_per_sec) + pcur.close() if do_all: @@ -160,80 +269,42 @@ class Indexer(object): cur.close() - self.queue_all("") - self.barrier.wait() + for t in self.threads: + t.wait() rank_end_time = datetime.now() diff_seconds = (rank_end_time-rank_start_time).total_seconds() - log.info("Done {} in {} @ {} per second - FINISHED {}\n".format( - done_tuples, int(diff_seconds), + log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format( + done_tuples, total_tuples, int(diff_seconds), done_tuples/diff_seconds, obj.name())) + def find_free_thread(self): + """ Generator that returns the next connection that is free for + sending a query. + """ + ready = self.threads + command_stat = 0 -class RankRunner(object): - - def __init__(self, rank): - self.rank = rank - - def name(self): - return "rank {}".format(self.rank) - - @classmethod - def prepare(cls): - return """PREPARE rnk_index AS - UPDATE placex - SET indexed_status = 0 WHERE place_id = $1""" - - def sql_index_sectors(self): - return """SELECT geometry_sector, count(*) FROM placex - WHERE rank_search = {} and indexed_status > 0 - GROUP BY geometry_sector - ORDER BY geometry_sector""".format(self.rank) - - def sql_nosector_places(self): - return """SELECT place_id FROM placex - WHERE indexed_status > 0 and rank_search = {} - ORDER BY geometry_sector""".format(self.rank) - - def sql_sector_places(self): - return """SELECT place_id FROM placex - WHERE indexed_status > 0 and geometry_sector = %s - ORDER BY geometry_sector""" - - def sql_index_place(self): - return "EXECUTE rnk_index(%s)" - - -class InterpolationRunner(object): - - def name(self): - return "interpolation lines (location_property_osmline)" - - @classmethod - def prepare(cls): - return """PREPARE ipl_index AS - UPDATE location_property_osmline - SET indexed_status = 0 WHERE place_id = $1""" - - def sql_index_sectors(self): - return """SELECT geometry_sector, count(*) FROM location_property_osmline - WHERE indexed_status > 0 - GROUP BY geometry_sector - ORDER BY geometry_sector""" - - def sql_nosector_places(self): - return """SELECT place_id FROM location_property_osmline - WHERE indexed_status > 0 - ORDER BY geometry_sector""" - - def sql_sector_places(self): - return """SELECT place_id FROM location_property_osmline - WHERE indexed_status > 0 and geometry_sector = %s - ORDER BY geometry_sector""" + while True: + for thread in ready: + if thread.is_done(): + command_stat += 1 + yield thread + + # refresh the connections occasionaly to avoid potential + # memory leaks in Postgresql. + if command_stat > 100000: + for t in self.threads: + while not t.is_done(): + wait_select(t.conn) + t.connect() + command_stat = 0 + ready = self.threads + else: + ready, _, _ = select.select(self.threads, [], []) - def sql_index_place(self): - return "EXECUTE ipl_index(%s)" + assert False, "Unreachable code" def nominatim_arg_parser(): @@ -242,7 +313,7 @@ def nominatim_arg_parser(): def h(s): return re.sub("\s\s+" , " ", s) - p = ArgumentParser(description=__doc__, + p = ArgumentParser(description="Indexing tool for Nominatim.", formatter_class=RawDescriptionHelpFormatter) p.add_argument('-d', '--database',