X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/b894d2c04aed9c8c13e8cae9d8d9e5f0369ad737..53dbe58ada3fb34534fa2a1d079c2cbbbe09496c:/nominatim/tokenizer/icu_tokenizer.py?ds=sidebyside diff --git a/nominatim/tokenizer/icu_tokenizer.py b/nominatim/tokenizer/icu_tokenizer.py index 61263678..3331a321 100644 --- a/nominatim/tokenizer/icu_tokenizer.py +++ b/nominatim/tokenizer/icu_tokenizer.py @@ -2,7 +2,6 @@ Tokenizer implementing normalisation as used before Nominatim 4 but using libICU instead of the PostgreSQL module. """ -from collections import Counter import itertools import json import logging @@ -13,11 +12,10 @@ from nominatim.db.connection import connect from nominatim.db.properties import set_property, get_property from nominatim.db.utils import CopyBuffer from nominatim.db.sql_preprocessor import SQLPreprocessor +from nominatim.indexer.place_info import PlaceInfo from nominatim.tokenizer.icu_rule_loader import ICURuleLoader -from nominatim.tokenizer.icu_name_processor import ICUNameProcessor, ICUNameProcessorRules from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer -DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq" DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization" LOG = logging.getLogger() @@ -37,9 +35,8 @@ class LegacyICUTokenizer(AbstractTokenizer): def __init__(self, dsn, data_dir): self.dsn = dsn self.data_dir = data_dir - self.naming_rules = None + self.loader = None self.term_normalization = None - self.max_word_frequency = None def init_new_db(self, config, init_db=True): @@ -48,56 +45,76 @@ class LegacyICUTokenizer(AbstractTokenizer): This copies all necessary data in the project directory to make sure the tokenizer remains stable even over updates. """ - loader = ICURuleLoader(config.load_sub_configuration('icu_tokenizer.yaml', - config='TOKENIZER_CONFIG')) - self.naming_rules = ICUNameProcessorRules(loader=loader) + self.loader = ICURuleLoader(config) + self.term_normalization = config.TERM_NORMALIZATION - self.max_word_frequency = config.MAX_WORD_FREQUENCY self._install_php(config.lib_dir.php) - self._save_config(config) + self._save_config() if init_db: self.update_sql_functions(config) self._init_db_tables(config) - def init_from_project(self): + def init_from_project(self, config): """ Initialise the tokenizer from the project directory. """ + self.loader = ICURuleLoader(config) + with connect(self.dsn) as conn: - self.naming_rules = ICUNameProcessorRules(conn=conn) + self.loader.load_config_from_db(conn) self.term_normalization = get_property(conn, DBCFG_TERM_NORMALIZATION) - self.max_word_frequency = get_property(conn, DBCFG_MAXWORDFREQ) - def finalize_import(self, _): + def finalize_import(self, config): """ Do any required postprocessing to make the tokenizer data ready for use. """ + with connect(self.dsn) as conn: + sqlp = SQLPreprocessor(conn, config) + sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql') def update_sql_functions(self, config): """ Reimport the SQL functions for this tokenizer. """ with connect(self.dsn) as conn: - max_word_freq = get_property(conn, DBCFG_MAXWORDFREQ) sqlp = SQLPreprocessor(conn, config) - sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql', - max_word_freq=max_word_freq) + sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql') - def check_database(self): + def check_database(self, config): """ Check that the tokenizer is set up correctly. """ - self.init_from_project() + self.init_from_project(config) - if self.naming_rules is None: + if self.term_normalization is None: return "Configuration for tokenizer 'icu' are missing." return None + def update_statistics(self): + """ Recompute frequencies for all name words. + """ + with connect(self.dsn) as conn: + if conn.table_exists('search_name'): + with conn.cursor() as cur: + cur.drop_table("word_frequencies") + LOG.info("Computing word frequencies") + cur.execute("""CREATE TEMP TABLE word_frequencies AS + SELECT unnest(name_vector) as id, count(*) + FROM search_name GROUP BY id""") + cur.execute("CREATE INDEX ON word_frequencies(id)") + LOG.info("Update word table with recomputed frequencies") + cur.execute("""UPDATE word + SET info = info || jsonb_build_object('count', count) + FROM word_frequencies WHERE word_id = id""") + cur.drop_table("word_frequencies") + conn.commit() + + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should @@ -113,7 +130,8 @@ class LegacyICUTokenizer(AbstractTokenizer): Analyzers are not thread-safe. You need to instantiate one per thread. """ - return LegacyICUNameAnalyzer(self.dsn, ICUNameProcessor(self.naming_rules)) + return LegacyICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(), + self.loader.make_token_analysis()) def _install_php(self, phpdir): @@ -122,20 +140,18 @@ class LegacyICUTokenizer(AbstractTokenizer): php_file = self.data_dir / "tokenizer.php" php_file.write_text(dedent(f"""\ = 0: - full_names.add(name[:brace_idx].strip()) - - return full_names - - def _add_postcode(self, postcode): """ Make sure the normalized postcode is present in the word table. """ @@ -492,7 +509,7 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer): postcode = self.normalize_postcode(postcode) if postcode not in self._cache.postcodes: - term = self.name_processor.get_search_normalized(postcode) + term = self._search_normalized(postcode) if not term: return @@ -551,30 +568,25 @@ class _TokenInfo: self.data['hnr'] = ';'.join(hnrs) - def add_street(self, fulls, _): + def add_street(self, tokens): """ Add addr:street match terms. """ - if fulls: - self.data['street'] = self._mk_array(fulls) + if tokens: + self.data['street'] = self._mk_array(tokens) - def add_place(self, fulls, partials): + def add_place(self, tokens): """ Add addr:place search and match terms. """ - if fulls: - self.data['place_search'] = self._mk_array(itertools.chain(fulls, partials)) - self.data['place_match'] = self._mk_array(fulls) + if tokens: + self.data['place'] = self._mk_array(tokens) def add_address_terms(self, terms): """ Add additional address terms. """ - tokens = {} - - for key, fulls, partials in terms: - if fulls: - tokens[key] = [self._mk_array(itertools.chain(fulls, partials)), - self._mk_array(fulls)] + tokens = {key: self._mk_array(partials) + for key, partials in terms if partials} if tokens: self.data['addr'] = tokens @@ -588,6 +600,7 @@ class _TokenCache: """ def __init__(self): self.names = {} + self.partials = {} self.postcodes = set() self.housenumbers = {}