X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/90b40fc3e684350c4dab743cb2c0a26c8792cc39..fc038261b9fe7f7d89a4000496185e72ac100a2a:/nominatim/tokenizer/legacy_tokenizer.py diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 8957426b..b720bbdd 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -113,7 +113,7 @@ class LegacyTokenizer(AbstractTokenizer): self._init_db_tables(config) - def init_from_project(self): + def init_from_project(self, _): """ Initialise the tokenizer from the project directory. """ with connect(self.dsn) as conn: @@ -142,7 +142,7 @@ class LegacyTokenizer(AbstractTokenizer): modulepath=modulepath) - def check_database(self): + def check_database(self, _): """ Check that the tokenizer is set up correctly. """ hint = """\ @@ -186,6 +186,25 @@ class LegacyTokenizer(AbstractTokenizer): self._save_config(conn, config) + def update_statistics(self): + """ Recompute the frequency of full words. + """ + with connect(self.dsn) as conn: + if conn.table_exists('search_name'): + with conn.cursor() as cur: + cur.drop_table("word_frequencies") + LOG.info("Computing word frequencies") + cur.execute("""CREATE TEMP TABLE word_frequencies AS + SELECT unnest(name_vector) as id, count(*) + FROM search_name GROUP BY id""") + cur.execute("CREATE INDEX ON word_frequencies(id)") + LOG.info("Update word table with recomputed frequencies") + cur.execute("""UPDATE word SET search_name_count = count + FROM word_frequencies + WHERE word_token like ' %' and word_id = id""") + cur.drop_table("word_frequencies") + conn.commit() + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should @@ -405,16 +424,15 @@ class LegacyNameAnalyzer(AbstractAnalyzer): """ token_info = _TokenInfo(self._cache) - names = place.get('name') + names = place.name if names: token_info.add_names(self.conn, names) - country_feature = place.get('country_feature') - if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature): - self.add_country_names(country_feature.lower(), names) + if place.is_country(): + self.add_country_names(place.country_code, names) - address = place.get('address') + address = place.address if address: self._process_place_address(token_info, address) @@ -495,7 +513,9 @@ class _TokenInfo: with conn.cursor() as cur: return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, )) - self.data['street'] = self.cache.streets.get(street, _get_street) + tokens = self.cache.streets.get(street, _get_street) + if tokens: + self.data['street'] = tokens def add_place(self, conn, place): @@ -524,9 +544,12 @@ class _TokenInfo: tokens = {} for key, value in terms: - tokens[key] = self.cache.address_terms.get(value, _get_address_term) + items = self.cache.address_terms.get(value, _get_address_term) + if items[0] or items[1]: + tokens[key] = items - self.data['addr'] = tokens + if tokens: + self.data['addr'] = tokens class _LRU: