X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/be65c8303f18d0f92bbf5bc9558f8789d33f21d9..206ee8718864d623507a0ae69070478dec411e84:/nominatim/tokenizer/legacy_tokenizer.py diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index dc6972dc..551b0536 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -1,3 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# This file is part of Nominatim. (https://nominatim.org) +# +# Copyright (C) 2022 by the Nominatim developer community. +# For a full list of authors see the git log. """ Tokenizer implementing normalisation as used before Nominatim 4. """ @@ -113,7 +119,7 @@ class LegacyTokenizer(AbstractTokenizer): self._init_db_tables(config) - def init_from_project(self): + def init_from_project(self, _): """ Initialise the tokenizer from the project directory. """ with connect(self.dsn) as conn: @@ -142,7 +148,7 @@ class LegacyTokenizer(AbstractTokenizer): modulepath=modulepath) - def check_database(self): + def check_database(self, _): """ Check that the tokenizer is set up correctly. """ hint = """\ @@ -186,6 +192,25 @@ class LegacyTokenizer(AbstractTokenizer): self._save_config(conn, config) + def update_statistics(self): + """ Recompute the frequency of full words. + """ + with connect(self.dsn) as conn: + if conn.table_exists('search_name'): + with conn.cursor() as cur: + cur.drop_table("word_frequencies") + LOG.info("Computing word frequencies") + cur.execute("""CREATE TEMP TABLE word_frequencies AS + SELECT unnest(name_vector) as id, count(*) + FROM search_name GROUP BY id""") + cur.execute("CREATE INDEX ON word_frequencies(id)") + LOG.info("Update word table with recomputed frequencies") + cur.execute("""UPDATE word SET search_name_count = count + FROM word_frequencies + WHERE word_token like ' %' and word_id = id""") + cur.drop_table("word_frequencies") + conn.commit() + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should @@ -494,7 +519,9 @@ class _TokenInfo: with conn.cursor() as cur: return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, )) - self.data['street'] = self.cache.streets.get(street, _get_street) + tokens = self.cache.streets.get(street, _get_street) + if tokens: + self.data['street'] = tokens def add_place(self, conn, place): @@ -523,9 +550,12 @@ class _TokenInfo: tokens = {} for key, value in terms: - tokens[key] = self.cache.address_terms.get(value, _get_address_term) + items = self.cache.address_terms.get(value, _get_address_term) + if items[0] or items[1]: + tokens[key] = items - self.data['addr'] = tokens + if tokens: + self.data['addr'] = tokens class _LRU: