X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/1ffb6bd5d0e1aea120f953a55d72025f47206242..1fcc9717bb6c543aa6e6cd7b5d0a65971dfec409:/nominatim/tokenizer/legacy_tokenizer.py?ds=inline diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 4c03678d..28f4b327 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -1,3 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# This file is part of Nominatim. (https://nominatim.org) +# +# Copyright (C) 2022 by the Nominatim developer community. +# For a full list of authors see the git log. """ Tokenizer implementing normalisation as used before Nominatim 4. """ @@ -16,6 +22,7 @@ from nominatim.db import properties from nominatim.db import utils as db_utils from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.errors import UsageError +from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer DBCFG_NORMALIZATION = "tokenizer_normalization" DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq" @@ -76,7 +83,7 @@ def _check_module(module_dir, conn): raise UsageError("Database module cannot be accessed.") from err -class LegacyTokenizer: +class LegacyTokenizer(AbstractTokenizer): """ The legacy tokenizer uses a special PostgreSQL module to normalize names and queries. The tokenizer thus implements normalization through calls to the database. @@ -112,7 +119,7 @@ class LegacyTokenizer: self._init_db_tables(config) - def init_from_project(self): + def init_from_project(self, _): """ Initialise the tokenizer from the project directory. """ with connect(self.dsn) as conn: @@ -141,7 +148,7 @@ class LegacyTokenizer: modulepath=modulepath) - def check_database(self): + def check_database(self, _): """ Check that the tokenizer is set up correctly. """ hint = """\ @@ -185,6 +192,32 @@ class LegacyTokenizer: self._save_config(conn, config) + def update_statistics(self): + """ Recompute the frequency of full words. + """ + with connect(self.dsn) as conn: + if conn.table_exists('search_name'): + with conn.cursor() as cur: + cur.drop_table("word_frequencies") + LOG.info("Computing word frequencies") + cur.execute("""CREATE TEMP TABLE word_frequencies AS + SELECT unnest(name_vector) as id, count(*) + FROM search_name GROUP BY id""") + cur.execute("CREATE INDEX ON word_frequencies(id)") + LOG.info("Update word table with recomputed frequencies") + cur.execute("""UPDATE word SET search_name_count = count + FROM word_frequencies + WHERE word_token like ' %' and word_id = id""") + cur.drop_table("word_frequencies") + conn.commit() + + + def update_word_tokens(self): + """ No house-keeping implemented for the legacy tokenizer. + """ + LOG.info("No tokenizer clean-up available.") + + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should @@ -238,7 +271,7 @@ class LegacyTokenizer: properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY) -class LegacyNameAnalyzer: +class LegacyNameAnalyzer(AbstractAnalyzer): """ The legacy analyzer uses the special Postgresql module for splitting names. @@ -255,14 +288,6 @@ class LegacyNameAnalyzer: self._cache = _TokenCache(self.conn) - def __enter__(self): - return self - - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): """ Free all resources used by the analyzer. """ @@ -271,8 +296,7 @@ class LegacyNameAnalyzer: self.conn = None - @staticmethod - def get_word_token_info(conn, words): + def get_word_token_info(self, words): """ Return token information for the given list of words. If a word starts with # it is assumed to be a full name otherwise is a partial name. @@ -283,7 +307,7 @@ class LegacyNameAnalyzer: The function is used for testing and debugging only and not necessarily efficient. """ - with conn.cursor() as cur: + with self.conn.cursor() as cur: cur.execute("""SELECT t.term, word_token, word_id FROM word, (SELECT unnest(%s::TEXT[]) as term) t WHERE word_token = (CASE @@ -371,19 +395,17 @@ class LegacyNameAnalyzer: to_delete = existing_phrases - norm_phrases if to_add: - psycopg2.extras.execute_values( - cur, + cur.execute_values( """ INSERT INTO word (word_id, word_token, word, class, type, search_name_count, operator) - (SELECT nextval('seq_word'), make_standard_name(name), name, + (SELECT nextval('seq_word'), ' ' || make_standard_name(name), name, class, type, 0, CASE WHEN op in ('in', 'near') THEN op ELSE null END FROM (VALUES %s) as v(name, class, type, op))""", to_add) if to_delete and should_replace: - psycopg2.extras.execute_values( - cur, + cur.execute_values( """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op) WHERE word = name and class = in_class and type = in_type and ((op = '-' and operator is null) or op = operator)""", @@ -400,11 +422,11 @@ class LegacyNameAnalyzer: cur.execute( """INSERT INTO word (word_id, word_token, country_code) (SELECT nextval('seq_word'), lookup_token, %s - FROM (SELECT ' ' || make_standard_name(n) as lookup_token + FROM (SELECT DISTINCT ' ' || make_standard_name(n) as lookup_token FROM unnest(%s)n) y WHERE NOT EXISTS(SELECT * FROM word WHERE word_token = lookup_token and country_code = %s)) - """, (country_code, names, country_code)) + """, (country_code, list(names.values()), country_code)) def process_place(self, place): @@ -415,47 +437,46 @@ class LegacyNameAnalyzer: """ token_info = _TokenInfo(self._cache) - names = place.get('name') + names = place.name if names: token_info.add_names(self.conn, names) - country_feature = place.get('country_feature') - if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature): - self.add_country_names(country_feature.lower(), list(names.values())) - - address = place.get('address') + if place.is_country(): + self.add_country_names(place.country_code, names) + address = place.address if address: - hnrs = [] - addr_terms = [] - for key, value in address.items(): - if key == 'postcode': - self._add_postcode(value) - elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'): - hnrs.append(value) - elif key == 'street': - token_info.add_street(self.conn, value) - elif key == 'place': - token_info.add_place(self.conn, value) - elif not key.startswith('_') and \ - key not in ('country', 'full'): - addr_terms.append((key, value)) - - if hnrs: - token_info.add_housenumbers(self.conn, hnrs) - - if addr_terms: - token_info.add_address_terms(self.conn, addr_terms) + self._process_place_address(token_info, address) return token_info.data - def _add_postcode(self, postcode): - """ Make sure the normalized postcode is present in the word table. - """ - if re.search(r'[:,;]', postcode) is None: - self._cache.add_postcode(self.conn, self.normalize_postcode(postcode)) + def _process_place_address(self, token_info, address): + hnrs = [] + addr_terms = [] + + for key, value in address.items(): + if key == 'postcode': + # Make sure the normalized postcode is present in the word table. + if re.search(r'[:,;]', value) is None: + self._cache.add_postcode(self.conn, + self.normalize_postcode(value)) + elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'): + hnrs.append(value) + elif key == 'street': + token_info.add_street(self.conn, value) + elif key == 'place': + token_info.add_place(self.conn, value) + elif not key.startswith('_') and key not in ('country', 'full'): + addr_terms.append((key, value)) + + if hnrs: + token_info.add_housenumbers(self.conn, hnrs) + + if addr_terms: + token_info.add_address_terms(self.conn, addr_terms) + class _TokenInfo: @@ -494,7 +515,7 @@ class _TokenInfo: simple_list = list(set(simple_list)) with conn.cursor() as cur: - cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, )) + cur.execute("SELECT * FROM create_housenumbers(%s)", (simple_list, )) self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone() @@ -505,7 +526,9 @@ class _TokenInfo: with conn.cursor() as cur: return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, )) - self.data['street'] = self.cache.streets.get(street, _get_street) + tokens = self.cache.streets.get(street, _get_street) + if tokens: + self.data['street'] = tokens def add_place(self, conn, place): @@ -513,10 +536,9 @@ class _TokenInfo: """ def _get_place(name): with conn.cursor() as cur: - cur.execute("""SELECT (addr_ids_from_name(%s) - || getorcreate_name_id(make_standard_name(%s), ''))::text, + cur.execute("""SELECT make_keywords(hstore('name' , %s))::text, word_ids_from_name(%s)::text""", - (name, name, name)) + (name, name)) return cur.fetchone() self.data['place_search'], self.data['place_match'] = \ @@ -535,9 +557,12 @@ class _TokenInfo: tokens = {} for key, value in terms: - tokens[key] = self.cache.address_terms.get(value, _get_address_term) + items = self.cache.address_terms.get(value, _get_address_term) + if items[0] or items[1]: + tokens[key] = items - self.data['addr'] = tokens + if tokens: + self.data['addr'] = tokens class _LRU: @@ -584,7 +609,7 @@ class _TokenCache: with conn.cursor() as cur: cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text FROM generate_series(1, 100) as i""") - self._cached_housenumbers = {str(r[0]) : r[1] for r in cur} + self._cached_housenumbers = {str(r[0]): r[1] for r in cur} # For postcodes remember the ones that have already been added self.postcodes = set()