]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/indexer/indexer.py
fetch place info asynchronously
[nominatim.git] / nominatim / indexer / indexer.py
index 3a39a151dd4f1f1c845ee15ae1c1591a75cd0809..d685e83a1546097366faa41fe8bc580ad5b4e660 100644 (file)
@@ -3,6 +3,9 @@ Main work horse for indexing (computing addresses) the database.
 """
 import logging
 import select
 """
 import logging
 import select
+import time
+
+import psycopg2.extras
 
 from nominatim.indexer.progress import ProgressLogger
 from nominatim.indexer import runners
 
 from nominatim.indexer.progress import ProgressLogger
 from nominatim.indexer import runners
@@ -124,8 +127,9 @@ class Indexer:
         LOG.warning("Starting indexing boundaries using %s threads",
                     self.num_threads)
 
         LOG.warning("Starting indexing boundaries using %s threads",
                     self.num_threads)
 
-        for rank in range(max(minrank, 4), min(maxrank, 26)):
-            self._index(runners.BoundaryRunner(rank))
+        with self.tokenizer.name_analyzer() as analyzer:
+            for rank in range(max(minrank, 4), min(maxrank, 26)):
+                self._index(runners.BoundaryRunner(rank, analyzer))
 
     def index_by_rank(self, minrank, maxrank):
         """ Index all entries of placex in the given rank range (inclusive)
 
     def index_by_rank(self, minrank, maxrank):
         """ Index all entries of placex in the given rank range (inclusive)
@@ -138,15 +142,16 @@ class Indexer:
         LOG.warning("Starting indexing rank (%i to %i) using %i threads",
                     minrank, maxrank, self.num_threads)
 
         LOG.warning("Starting indexing rank (%i to %i) using %i threads",
                     minrank, maxrank, self.num_threads)
 
-        for rank in range(max(1, minrank), maxrank):
-            self._index(runners.RankRunner(rank))
+        with self.tokenizer.name_analyzer() as analyzer:
+            for rank in range(max(1, minrank), maxrank):
+                self._index(runners.RankRunner(rank, analyzer))
 
 
-        if maxrank == 30:
-            self._index(runners.RankRunner(0))
-            self._index(runners.InterpolationRunner(), 20)
-            self._index(runners.RankRunner(30), 20)
-        else:
-            self._index(runners.RankRunner(maxrank))
+            if maxrank == 30:
+                self._index(runners.RankRunner(0, analyzer))
+                self._index(runners.InterpolationRunner(analyzer), 20)
+                self._index(runners.RankRunner(30, analyzer), 20)
+            else:
+                self._index(runners.RankRunner(maxrank, analyzer))
 
 
     def index_postcodes(self):
 
 
     def index_postcodes(self):
@@ -174,32 +179,75 @@ class Indexer:
         LOG.warning("Starting %s (using batch size %s)", runner.name(), batch)
 
         with connect(self.dsn) as conn:
         LOG.warning("Starting %s (using batch size %s)", runner.name(), batch)
 
         with connect(self.dsn) as conn:
+            psycopg2.extras.register_hstore(conn)
             with conn.cursor() as cur:
                 total_tuples = cur.scalar(runner.sql_count_objects())
                 LOG.debug("Total number of rows: %i", total_tuples)
 
             with conn.cursor() as cur:
                 total_tuples = cur.scalar(runner.sql_count_objects())
                 LOG.debug("Total number of rows: %i", total_tuples)
 
+                # need to fetch those manually because register_hstore cannot
+                # fetch them on an asynchronous connection below.
+                hstore_oid = cur.scalar("SELECT 'hstore'::regtype::oid")
+                hstore_array_oid = cur.scalar("SELECT 'hstore[]'::regtype::oid")
+
             conn.commit()
 
             progress = ProgressLogger(runner.name(), total_tuples)
 
             conn.commit()
 
             progress = ProgressLogger(runner.name(), total_tuples)
 
+            fetcher_wait = 0
+            pool_wait = 0
+
             if total_tuples > 0:
                 with conn.cursor(name='places') as cur:
                     cur.execute(runner.sql_get_objects())
 
             if total_tuples > 0:
                 with conn.cursor(name='places') as cur:
                     cur.execute(runner.sql_get_objects())
 
+                    fetcher = DBConnection(self.dsn, cursor_factory=psycopg2.extras.DictCursor)
+                    psycopg2.extras.register_hstore(fetcher.conn,
+                                                    oid=hstore_oid,
+                                                    array_oid=hstore_array_oid)
+
                     with WorkerPool(self.dsn, self.num_threads) as pool:
                     with WorkerPool(self.dsn, self.num_threads) as pool:
-                        while True:
-                            places = [p for p in cur.fetchmany(batch)]
+                        places = self._fetch_next_batch(cur, fetcher, runner)
+                        while places is not None:
                             if not places:
                             if not places:
-                                break
-
-                            LOG.debug("Processing places: %s", str(places))
-                            worker = pool.next_free_worker()
-
-                            runner.index_places(worker, places)
-                            progress.add(len(places))
+                                t0 = time.time()
+                                fetcher.wait()
+                                fetcher_wait += time.time() - t0
+                                places = fetcher.cursor.fetchall()
+
+                            # asynchronously get the next batch
+                            next_places = self._fetch_next_batch(cur, fetcher, runner)
+
+                            # And insert the curent batch
+                            for idx in range(0, len(places), batch):
+                                t0 = time.time()
+                                worker = pool.next_free_worker()
+                                pool_wait += time.time() - t0
+                                part = places[idx:idx+batch]
+                                LOG.debug("Processing places: %s", str(part))
+                                runner.index_places(worker, part)
+                                progress.add(len(part))
+
+                            places = next_places
 
                         pool.finish_all()
 
 
                         pool.finish_all()
 
+                    fetcher.wait()
+                    fetcher.close()
+
                 conn.commit()
 
         progress.done()
                 conn.commit()
 
         progress.done()
+        LOG.warning("Wait time: fetcher: {}s,  pool: {}s".format(fetcher_wait, pool_wait))
+
+
+    def _fetch_next_batch(self, cur, fetcher, runner):
+        ids = cur.fetchmany(100)
+
+        if not ids:
+            return None
+
+        if not hasattr(runner, 'get_place_details'):
+            return ids
+
+        runner.get_place_details(fetcher, ids)
+        return []