]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/nominatim.py
fix errors reported by pylint
[nominatim.git] / nominatim / nominatim.py
old mode 100644 (file)
new mode 100755 (executable)
index 6190706..8cac583
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 #-----------------------------------------------------------------------------
 # nominatim - [description]
 #-----------------------------------------------------------------------------
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 #-----------------------------------------------------------------------------
-
-from argparse import ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError
+# pylint: disable=C0111
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
 import logging
 import sys
-import re
 import getpass
-from datetime import datetime
-import psycopg2
-from psycopg2.extras import wait_select
-import threading
-from queue import Queue
-
-log = logging.getLogger()
-
-def make_connection(options, asynchronous=False):
-    return psycopg2.connect(dbname=options.dbname, user=options.user,
-                            password=options.password, host=options.host,
-                            port=options.port, async_=asynchronous)
-
-class IndexingThread(threading.Thread):
-
-    def __init__(self, queue, barrier, options):
-        super().__init__()
-        self.conn = make_connection(options)
-        self.conn.autocommit = True
-
-        self.cursor = self.conn.cursor()
-        self.perform("SET lc_messages TO 'C'")
-        self.perform(InterpolationRunner.prepare())
-        self.perform(RankRunner.prepare())
-        self.queue = queue
-        self.barrier = barrier
-
-    def run(self):
-        sql = None
-        while True:
-            item = self.queue.get()
-            if item is None:
-                break
-            elif isinstance(item, str):
-                sql = item
-                self.barrier.wait()
-            else:
-                self.perform(sql, (item,))
+import select
 
-    def perform(self, sql, args=None):
-        while True:
-            try:
-                self.cursor.execute(sql, args)
-                return
-            except psycopg2.extensions.TransactionRollbackError as e:
-                if e.pgcode is None:
-                    raise RuntimeError("Postgres exception has no error code")
-                if e.pgcode == '40P01':
-                    log.info("Deadlock detected, retry.")
-                else:
-                    raise
-
-
-
-class Indexer(object):
-
-    def __init__(self, options):
-        self.options = options
-        self.conn = make_connection(options)
-
-        self.threads = []
-        self.queue = Queue(maxsize=1000)
-        self.barrier = threading.Barrier(options.threads + 1)
-        for i in range(options.threads):
-            t = IndexingThread(self.queue, self.barrier, options)
-            self.threads.append(t)
-            t.start()
-
-    def run(self):
-        log.info("Starting indexing rank ({} to {}) using {} threads".format(
-                 self.options.minrank, self.options.maxrank,
-                 self.options.threads))
-
-        for rank in range(self.options.minrank, 30):
-            self.index(RankRunner(rank))
+from indexer.progress import ProgressLogger # pylint: disable=E0401
+from indexer.db import DBConnection, make_connection # pylint: disable=E0401
 
-        if self.options.maxrank >= 30:
-            self.index(InterpolationRunner())
-            self.index(RankRunner(30))
+LOG = logging.getLogger()
 
-        self.queue_all(None)
-        for t in self.threads:
-            t.join()
+class RankRunner:
+    """ Returns SQL commands for indexing one rank within the placex table.
+    """
 
-    def queue_all(self, item):
-        for t in self.threads:
-            self.queue.put(item)
+    def __init__(self, rank):
+        self.rank = rank
 
-    def index(self, obj):
-        log.info("Starting {}".format(obj.name()))
+    def name(self):
+        return "rank {}".format(self.rank)
 
-        self.queue_all(obj.sql_index_place())
-        self.barrier.wait()
+    def sql_count_objects(self):
+        return """SELECT count(*) FROM placex
+                  WHERE rank_address = {} and indexed_status > 0
+               """.format(self.rank)
 
-        cur = self.conn.cursor(name="main")
-        cur.execute(obj.sql_index_sectors())
+    def sql_get_objects(self):
+        return """SELECT place_id FROM placex
+                  WHERE indexed_status > 0 and rank_address = {}
+                  ORDER BY geometry_sector""".format(self.rank)
 
-        total_tuples = 0
-        for r in cur:
-            total_tuples += r[1]
-        log.debug("Total number of rows; {}".format(total_tuples))
+    @staticmethod
+    def sql_index_place(ids):
+        return "UPDATE placex SET indexed_status = 0 WHERE place_id IN ({})"\
+               .format(','.join((str(i) for i in ids)))
 
-        cur.scroll(0, mode='absolute')
 
-        done_tuples = 0
-        rank_start_time = datetime.now()
-        for r in cur:
-            sector = r[0]
+class InterpolationRunner:
+    """ Returns SQL commands for indexing the address interpolation table
+        location_property_osmline.
+    """
 
-            # Should we do the remaining ones together?
-            do_all = total_tuples - done_tuples < len(self.threads) * 1000
+    @staticmethod
+    def name():
+        return "interpolation lines (location_property_osmline)"
 
-            pcur = self.conn.cursor(name='places')
+    @staticmethod
+    def sql_count_objects():
+        return """SELECT count(*) FROM location_property_osmline
+                  WHERE indexed_status > 0"""
 
-            if do_all:
-                pcur.execute(obj.sql_nosector_places())
-            else:
-                pcur.execute(obj.sql_sector_places(), (sector, ))
+    @staticmethod
+    def sql_get_objects():
+        return """SELECT place_id FROM location_property_osmline
+                  WHERE indexed_status > 0
+                  ORDER BY geometry_sector"""
 
-            for place in pcur:
-                place_id = place[0]
-                log.debug("Processing place {}".format(place_id))
+    @staticmethod
+    def sql_index_place(ids):
+        return """UPDATE location_property_osmline
+                  SET indexed_status = 0 WHERE place_id IN ({})"""\
+               .format(','.join((str(i) for i in ids)))
 
-                self.queue.put(place_id)
-                done_tuples += 1
+class BoundaryRunner:
+    """ Returns SQL commands for indexing the administrative boundaries
+        of a certain rank.
+    """
 
-            pcur.close()
+    def __init__(self, rank):
+        self.rank = rank
 
-            if do_all:
-                break
+    def name(self):
+        return "boundaries rank {}".format(self.rank)
 
-        cur.close()
+    def sql_count_objects(self):
+        return """SELECT count(*) FROM placex
+                  WHERE indexed_status > 0
+                    AND rank_search = {}
+                    AND class = 'boundary' and type = 'administrative'""".format(self.rank)
+
+    def sql_get_objects(self):
+        return """SELECT place_id FROM placex
+                  WHERE indexed_status > 0 and rank_search = {}
+                        and class = 'boundary' and type = 'administrative'
+                  ORDER BY partition, admin_level""".format(self.rank)
 
-        self.queue_all("")
-        self.barrier.wait()
+    @staticmethod
+    def sql_index_place(ids):
+        return "UPDATE placex SET indexed_status = 0 WHERE place_id IN ({})"\
+               .format(','.join((str(i) for i in ids)))
 
-        rank_end_time = datetime.now()
-        diff_seconds = (rank_end_time-rank_start_time).total_seconds()
+class Indexer:
+    """ Main indexing routine.
+    """
 
-        log.info("Done {} in {} @ {} per second - FINISHED {}\n".format(
-                 done_tuples, int(diff_seconds),
-                 done_tuples/diff_seconds, obj.name()))
+    def __init__(self, opts):
+        self.minrank = max(1, opts.minrank)
+        self.maxrank = min(30, opts.maxrank)
+        self.conn = make_connection(opts)
+        self.threads = [DBConnection(opts) for _ in range(opts.threads)]
 
+    def index_boundaries(self):
+        LOG.warning("Starting indexing boundaries using %s threads",
+                    len(self.threads))
 
-class RankRunner(object):
+        for rank in range(max(self.minrank, 5), min(self.maxrank, 26)):
+            self.index(BoundaryRunner(rank))
 
-    def __init__(self, rank):
-        self.rank = rank
+    def index_by_rank(self):
+        """ Run classic indexing by rank.
+        """
+        LOG.warning("Starting indexing rank (%i to %i) using %i threads",
+                    self.minrank, self.maxrank, len(self.threads))
 
-    def name(self):
-        return "rank {}".format(self.rank)
+        for rank in range(max(1, self.minrank), self.maxrank):
+            self.index(RankRunner(rank))
 
-    @classmethod
-    def prepare(cls):
-        return """PREPARE rnk_index AS
-                  UPDATE placex
-                  SET indexed_status = 0 WHERE place_id = $1"""
+        if self.maxrank == 30:
+            self.index(RankRunner(0))
+            self.index(InterpolationRunner(), 20)
+            self.index(RankRunner(self.maxrank), 20)
+        else:
+            self.index(RankRunner(self.maxrank))
 
-    def sql_index_sectors(self):
-        return """SELECT geometry_sector, count(*) FROM placex
-                  WHERE rank_search = {} and indexed_status > 0
-                  GROUP BY geometry_sector
-                  ORDER BY geometry_sector""".format(self.rank)
+    def index(self, obj, batch=1):
+        """ Index a single rank or table. `obj` describes the SQL to use
+            for indexing. `batch` describes the number of objects that
+            should be processed with a single SQL statement
+        """
+        LOG.warning("Starting %s (using batch size %s)", obj.name(), batch)
 
-    def sql_nosector_places(self):
-        return """SELECT place_id FROM placex
-                  WHERE indexed_status > 0 and rank_search = {}
-                  ORDER BY geometry_sector""".format(self.rank)
+        cur = self.conn.cursor()
+        cur.execute(obj.sql_count_objects())
 
-    def sql_sector_places(self):
-        return """SELECT place_id FROM placex
-                  WHERE indexed_status > 0 and geometry_sector = %s
-                  ORDER BY geometry_sector"""
+        total_tuples = cur.fetchone()[0]
+        LOG.debug("Total number of rows: %i", total_tuples)
 
-    def sql_index_place(self):
-        return "EXECUTE rnk_index(%s)"
+        cur.close()
 
+        progress = ProgressLogger(obj.name(), total_tuples)
 
-class InterpolationRunner(object):
+        if total_tuples > 0:
+            cur = self.conn.cursor(name='places')
+            cur.execute(obj.sql_get_objects())
 
-    def name(self):
-        return "interpolation lines (location_property_osmline)"
+            next_thread = self.find_free_thread()
+            while True:
+                places = [p[0] for p in cur.fetchmany(batch)]
+                if not places:
+                    break
 
-    @classmethod
-    def prepare(cls):
-        return """PREPARE ipl_index AS
-                  UPDATE location_property_osmline
-                  SET indexed_status = 0 WHERE place_id = $1"""
+                LOG.debug("Processing places: %s", str(places))
+                thread = next(next_thread)
 
-    def sql_index_sectors(self):
-        return """SELECT geometry_sector, count(*) FROM location_property_osmline
-                  WHERE indexed_status > 0
-                  GROUP BY geometry_sector
-                  ORDER BY geometry_sector"""
+                thread.perform(obj.sql_index_place(places))
+                progress.add(len(places))
 
-    def sql_nosector_places(self):
-        return """SELECT place_id FROM location_property_osmline
-                  WHERE indexed_status > 0
-                  ORDER BY geometry_sector"""
+            cur.close()
 
-    def sql_sector_places(self):
-        return """SELECT place_id FROM location_property_osmline
-                  WHERE indexed_status > 0 and geometry_sector = %s
-                  ORDER BY geometry_sector"""
+            for thread in self.threads:
+                thread.wait()
+
+        progress.done()
+
+    def find_free_thread(self):
+        """ Generator that returns the next connection that is free for
+            sending a query.
+        """
+        ready = self.threads
+        command_stat = 0
+
+        while True:
+            for thread in ready:
+                if thread.is_done():
+                    command_stat += 1
+                    yield thread
+
+            # refresh the connections occasionaly to avoid potential
+            # memory leaks in Postgresql.
+            if command_stat > 100000:
+                for thread in self.threads:
+                    while not thread.is_done():
+                        thread.wait()
+                    thread.connect()
+                command_stat = 0
+                ready = self.threads
+            else:
+                ready, _, _ = select.select(self.threads, [], [])
 
-    def sql_index_place(self):
-        return "EXECUTE ipl_index(%s)"
+        assert False, "Unreachable code"
 
 
 def nominatim_arg_parser():
     """ Setup the command-line parser for the tool.
     """
-    def h(s):
-        return re.sub("\s\s+" , " ", s)
-
-    p = ArgumentParser(description=__doc__,
-                       formatter_class=RawDescriptionHelpFormatter)
-
-    p.add_argument('-d', '--database',
-                   dest='dbname', action='store', default='nominatim',
-                   help='Name of the PostgreSQL database to connect to.')
-    p.add_argument('-U', '--username',
-                   dest='user', action='store',
-                   help='PostgreSQL user name.')
-    p.add_argument('-W', '--password',
-                   dest='password_prompt', action='store_true',
-                   help='Force password prompt.')
-    p.add_argument('-H', '--host',
-                   dest='host', action='store',
-                   help='PostgreSQL server hostname or socket location.')
-    p.add_argument('-P', '--port',
-                   dest='port', action='store',
-                   help='PostgreSQL server port')
-    p.add_argument('-r', '--minrank',
-                   dest='minrank', type=int, metavar='RANK', default=0,
-                   help='Minimum/starting rank.')
-    p.add_argument('-R', '--maxrank',
-                   dest='maxrank', type=int, metavar='RANK', default=30,
-                   help='Maximum/finishing rank.')
-    p.add_argument('-t', '--threads',
-                   dest='threads', type=int, metavar='NUM', default=1,
-                   help='Number of threads to create for indexing.')
-    p.add_argument('-v', '--verbose',
-                   dest='loglevel', action='count', default=0,
-                   help='Increase verbosity')
-
-    return p
+    parser = ArgumentParser(description="Indexing tool for Nominatim.",
+                            formatter_class=RawDescriptionHelpFormatter)
+
+    parser.add_argument('-d', '--database',
+                        dest='dbname', action='store', default='nominatim',
+                        help='Name of the PostgreSQL database to connect to.')
+    parser.add_argument('-U', '--username',
+                        dest='user', action='store',
+                        help='PostgreSQL user name.')
+    parser.add_argument('-W', '--password',
+                        dest='password_prompt', action='store_true',
+                        help='Force password prompt.')
+    parser.add_argument('-H', '--host',
+                        dest='host', action='store',
+                        help='PostgreSQL server hostname or socket location.')
+    parser.add_argument('-P', '--port',
+                        dest='port', action='store',
+                        help='PostgreSQL server port')
+    parser.add_argument('-b', '--boundary-only',
+                        dest='boundary_only', action='store_true',
+                        help='Only index administrative boundaries (ignores min/maxrank).')
+    parser.add_argument('-r', '--minrank',
+                        dest='minrank', type=int, metavar='RANK', default=0,
+                        help='Minimum/starting rank.')
+    parser.add_argument('-R', '--maxrank',
+                        dest='maxrank', type=int, metavar='RANK', default=30,
+                        help='Maximum/finishing rank.')
+    parser.add_argument('-t', '--threads',
+                        dest='threads', type=int, metavar='NUM', default=1,
+                        help='Number of threads to create for indexing.')
+    parser.add_argument('-v', '--verbose',
+                        dest='loglevel', action='count', default=0,
+                        help='Increase verbosity')
+
+    return parser
 
 if __name__ == '__main__':
     logging.basicConfig(stream=sys.stderr, format='%(levelname)s: %(message)s')
 
-    options = nominatim_arg_parser().parse_args(sys.argv[1:])
+    OPTIONS = nominatim_arg_parser().parse_args(sys.argv[1:])
 
-    log.setLevel(max(3 - options.loglevel, 0) * 10)
+    LOG.setLevel(max(3 - OPTIONS.loglevel, 0) * 10)
 
-    options.password = None
-    if options.password_prompt:
-        password = getpass.getpass("Database password: ")
-        options.password = password
+    OPTIONS.password = None
+    if OPTIONS.password_prompt:
+        PASSWORD = getpass.getpass("Database password: ")
+        OPTIONS.password = PASSWORD
 
-    Indexer(options).run()
+    if OPTIONS.boundary_only:
+        Indexer(OPTIONS).index_boundaries()
+    else:
+        Indexer(OPTIONS).index_by_rank()