]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/legacy_icu_tokenizer.py
switch housenumber tokens to new word table layout
[nominatim.git] / nominatim / tokenizer / legacy_icu_tokenizer.py
index 6d3d11c163eed81995b8c2c7c71f7870de5395ec..9fbb9bb09688cf349bfb28a74c84d640fcd84767 100644 (file)
@@ -152,7 +152,7 @@ class LegacyICUTokenizer:
         """
         with connect(self.dsn) as conn:
             sqlp = SQLPreprocessor(conn, config)
-            sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
+            sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer_tables.sql')
             conn.commit()
 
             LOG.warning("Precomputing word tokens")
@@ -371,22 +371,28 @@ class LegacyICUNameAnalyzer:
         """
         word_tokens = set()
         for name in self._compute_full_names(names):
-            if name:
-                word_tokens.add(' ' + self.name_processor.get_search_normalized(name))
+            norm_name = self.name_processor.get_search_normalized(name)
+            if norm_name:
+                word_tokens.add(norm_name)
 
         with self.conn.cursor() as cur:
             # Get existing names
-            cur.execute("SELECT word_token FROM word WHERE country_code = %s",
+            cur.execute("""SELECT word_token FROM word
+                            WHERE type = 'C' and info->>'cc'= %s""",
                         (country_code, ))
             word_tokens.difference_update((t[0] for t in cur))
 
+            # Only add those names that are not yet in the list.
             if word_tokens:
-                cur.execute("""INSERT INTO word (word_id, word_token, country_code,
-                                                 search_name_count)
-                               (SELECT nextval('seq_word'), token, %s, 0
+                cur.execute("""INSERT INTO word (word_token, type, info)
+                               (SELECT token, 'C', json_build_object('cc', %s)
                                 FROM unnest(%s) as token)
                             """, (country_code, list(word_tokens)))
 
+            # No names are deleted at the moment.
+            # If deletion is made possible, then the static names from the
+            # initial 'country_name' table should be kept.
+
 
     def process_place(self, place):
         """ Determine tokenizer information about the given place.
@@ -595,7 +601,8 @@ class _TokenCache:
 
     def get_hnr_tokens(self, conn, terms):
         """ Get token ids for a list of housenumbers, looking them up in the
-            database if necessary.
+            database if necessary. `terms` is an iterable of normalized
+            housenumbers.
         """
         tokens = []
         askdb = []