]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/icu_tokenizer.py
do not count words when in reverse-only mode
[nominatim.git] / nominatim / tokenizer / icu_tokenizer.py
index 2af0bcb257ad214f3e67621a7ac1aaa83b7092d1..3331a3210aaba70d49b602299c2ce9e88238a3a0 100644 (file)
@@ -67,10 +67,13 @@ class LegacyICUTokenizer(AbstractTokenizer):
             self.term_normalization = get_property(conn, DBCFG_TERM_NORMALIZATION)
 
 
-    def finalize_import(self, _):
+    def finalize_import(self, config):
         """ Do any required postprocessing to make the tokenizer data ready
             for use.
         """
+        with connect(self.dsn) as conn:
+            sqlp = SQLPreprocessor(conn, config)
+            sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
 
 
     def update_sql_functions(self, config):
@@ -96,18 +99,19 @@ class LegacyICUTokenizer(AbstractTokenizer):
         """ Recompute frequencies for all name words.
         """
         with connect(self.dsn) as conn:
-            with conn.cursor() as cur:
-                cur.drop_table("word_frequencies")
-                LOG.info("Computing word frequencies")
-                cur.execute("""CREATE TEMP TABLE word_frequencies AS
-                                 SELECT unnest(name_vector) as id, count(*)
-                                 FROM search_name GROUP BY id""")
-                cur.execute("CREATE INDEX ON word_frequencies(id)")
-                LOG.info("Update word table with recomputed frequencies")
-                cur.execute("""UPDATE word
-                               SET info = info || jsonb_build_object('count', count)
-                               FROM word_frequencies WHERE word_id = id""")
-                cur.drop_table("word_frequencies")
+            if conn.table_exists('search_name'):
+                with conn.cursor() as cur:
+                    cur.drop_table("word_frequencies")
+                    LOG.info("Computing word frequencies")
+                    cur.execute("""CREATE TEMP TABLE word_frequencies AS
+                                     SELECT unnest(name_vector) as id, count(*)
+                                     FROM search_name GROUP BY id""")
+                    cur.execute("CREATE INDEX ON word_frequencies(id)")
+                    LOG.info("Update word table with recomputed frequencies")
+                    cur.execute("""UPDATE word
+                                   SET info = info || jsonb_build_object('count', count)
+                                   FROM word_frequencies WHERE word_id = id""")
+                    cur.drop_table("word_frequencies")
             conn.commit()