]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/legacy_tokenizer.py
clean_housenumbers: make kinds and delimiters configurable
[nominatim.git] / nominatim / tokenizer / legacy_tokenizer.py
index 8957426b353efa7ec17f572e754f7fe47f90022c..551b0536b88dbe77012a364015005a60cbe19548 100644 (file)
@@ -1,3 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2022 by the Nominatim developer community.
+# For a full list of authors see the git log.
 """
 Tokenizer implementing normalisation as used before Nominatim 4.
 """
@@ -113,7 +119,7 @@ class LegacyTokenizer(AbstractTokenizer):
             self._init_db_tables(config)
 
 
-    def init_from_project(self):
+    def init_from_project(self, _):
         """ Initialise the tokenizer from the project directory.
         """
         with connect(self.dsn) as conn:
@@ -142,7 +148,7 @@ class LegacyTokenizer(AbstractTokenizer):
                               modulepath=modulepath)
 
 
-    def check_database(self):
+    def check_database(self, _):
         """ Check that the tokenizer is set up correctly.
         """
         hint = """\
@@ -186,6 +192,25 @@ class LegacyTokenizer(AbstractTokenizer):
             self._save_config(conn, config)
 
 
+    def update_statistics(self):
+        """ Recompute the frequency of full words.
+        """
+        with connect(self.dsn) as conn:
+            if conn.table_exists('search_name'):
+                with conn.cursor() as cur:
+                    cur.drop_table("word_frequencies")
+                    LOG.info("Computing word frequencies")
+                    cur.execute("""CREATE TEMP TABLE word_frequencies AS
+                                     SELECT unnest(name_vector) as id, count(*)
+                                     FROM search_name GROUP BY id""")
+                    cur.execute("CREATE INDEX ON word_frequencies(id)")
+                    LOG.info("Update word table with recomputed frequencies")
+                    cur.execute("""UPDATE word SET search_name_count = count
+                                   FROM word_frequencies
+                                   WHERE word_token like ' %' and word_id = id""")
+                    cur.drop_table("word_frequencies")
+            conn.commit()
+
     def name_analyzer(self):
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should
@@ -405,16 +430,15 @@ class LegacyNameAnalyzer(AbstractAnalyzer):
         """
         token_info = _TokenInfo(self._cache)
 
-        names = place.get('name')
+        names = place.name
 
         if names:
             token_info.add_names(self.conn, names)
 
-            country_feature = place.get('country_feature')
-            if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
-                self.add_country_names(country_feature.lower(), names)
+            if place.is_country():
+                self.add_country_names(place.country_code, names)
 
-        address = place.get('address')
+        address = place.address
         if address:
             self._process_place_address(token_info, address)
 
@@ -495,7 +519,9 @@ class _TokenInfo:
             with conn.cursor() as cur:
                 return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))
 
-        self.data['street'] = self.cache.streets.get(street, _get_street)
+        tokens = self.cache.streets.get(street, _get_street)
+        if tokens:
+            self.data['street'] = tokens
 
 
     def add_place(self, conn, place):
@@ -524,9 +550,12 @@ class _TokenInfo:
 
         tokens = {}
         for key, value in terms:
-            tokens[key] = self.cache.address_terms.get(value, _get_address_term)
+            items = self.cache.address_terms.get(value, _get_address_term)
+            if items[0] or items[1]:
+                tokens[key] = items
 
-        self.data['addr'] = tokens
+        if tokens:
+            self.data['addr'] = tokens
 
 
 class _LRU: