X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/118858a55e5ec522d870842532d26ff0276c85ba..5e792078b3ed580f723a47182325405d54cae822:/nominatim/tokenizer/icu_tokenizer.py diff --git a/nominatim/tokenizer/icu_tokenizer.py b/nominatim/tokenizer/icu_tokenizer.py index cb411204..33f05cc4 100644 --- a/nominatim/tokenizer/icu_tokenizer.py +++ b/nominatim/tokenizer/icu_tokenizer.py @@ -2,23 +2,19 @@ Tokenizer implementing normalisation as used before Nominatim 4 but using libICU instead of the PostgreSQL module. """ -from collections import Counter import itertools import json import logging import re from textwrap import dedent -from pathlib import Path from nominatim.db.connection import connect -from nominatim.db.properties import set_property, get_property from nominatim.db.utils import CopyBuffer from nominatim.db.sql_preprocessor import SQLPreprocessor +from nominatim.indexer.place_info import PlaceInfo from nominatim.tokenizer.icu_rule_loader import ICURuleLoader -from nominatim.tokenizer.icu_name_processor import ICUNameProcessor, ICUNameProcessorRules from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer -DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq" DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization" LOG = logging.getLogger() @@ -38,9 +34,7 @@ class LegacyICUTokenizer(AbstractTokenizer): def __init__(self, dsn, data_dir): self.dsn = dsn self.data_dir = data_dir - self.naming_rules = None - self.term_normalization = None - self.max_word_frequency = None + self.loader = None def init_new_db(self, config, init_db=True): @@ -49,58 +43,67 @@ class LegacyICUTokenizer(AbstractTokenizer): This copies all necessary data in the project directory to make sure the tokenizer remains stable even over updates. """ - if config.TOKENIZER_CONFIG: - cfgfile = Path(config.TOKENIZER_CONFIG) - else: - cfgfile = config.config_dir / 'icu_tokenizer.yaml' - - loader = ICURuleLoader(cfgfile) - self.naming_rules = ICUNameProcessorRules(loader=loader) - self.term_normalization = config.TERM_NORMALIZATION - self.max_word_frequency = config.MAX_WORD_FREQUENCY + self.loader = ICURuleLoader(config) self._install_php(config.lib_dir.php) - self._save_config(config) + self._save_config() if init_db: self.update_sql_functions(config) self._init_db_tables(config) - def init_from_project(self): + def init_from_project(self, config): """ Initialise the tokenizer from the project directory. """ + self.loader = ICURuleLoader(config) + with connect(self.dsn) as conn: - self.naming_rules = ICUNameProcessorRules(conn=conn) - self.term_normalization = get_property(conn, DBCFG_TERM_NORMALIZATION) - self.max_word_frequency = get_property(conn, DBCFG_MAXWORDFREQ) + self.loader.load_config_from_db(conn) - def finalize_import(self, _): + def finalize_import(self, config): """ Do any required postprocessing to make the tokenizer data ready for use. """ + with connect(self.dsn) as conn: + sqlp = SQLPreprocessor(conn, config) + sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql') def update_sql_functions(self, config): """ Reimport the SQL functions for this tokenizer. """ with connect(self.dsn) as conn: - max_word_freq = get_property(conn, DBCFG_MAXWORDFREQ) sqlp = SQLPreprocessor(conn, config) - sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql', - max_word_freq=max_word_freq) + sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql') - def check_database(self): + def check_database(self, config): """ Check that the tokenizer is set up correctly. """ - self.init_from_project() + # Will throw an error if there is an issue. + self.init_from_project(config) - if self.naming_rules is None: - return "Configuration for tokenizer 'icu' are missing." - return None + def update_statistics(self): + """ Recompute frequencies for all name words. + """ + with connect(self.dsn) as conn: + if conn.table_exists('search_name'): + with conn.cursor() as cur: + cur.drop_table("word_frequencies") + LOG.info("Computing word frequencies") + cur.execute("""CREATE TEMP TABLE word_frequencies AS + SELECT unnest(name_vector) as id, count(*) + FROM search_name GROUP BY id""") + cur.execute("CREATE INDEX ON word_frequencies(id)") + LOG.info("Update word table with recomputed frequencies") + cur.execute("""UPDATE word + SET info = info || jsonb_build_object('count', count) + FROM word_frequencies WHERE word_id = id""") + cur.drop_table("word_frequencies") + conn.commit() def name_analyzer(self): @@ -118,7 +121,8 @@ class LegacyICUTokenizer(AbstractTokenizer): Analyzers are not thread-safe. You need to instantiate one per thread. """ - return LegacyICUNameAnalyzer(self.dsn, ICUNameProcessor(self.naming_rules)) + return LegacyICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(), + self.loader.make_token_analysis()) def _install_php(self, phpdir): @@ -127,21 +131,18 @@ class LegacyICUTokenizer(AbstractTokenizer): php_file = self.data_dir / "tokenizer.php" php_file.write_text(dedent(f"""\ 0: + full = cur.fetchone()[0] + + self._cache.fulls[norm_name] = full + + return full + def _compute_name_tokens(self, names): """ Computes the full name and partial name tokens for the given dictionary of names. """ - full_names = self._compute_full_names(names) full_tokens = set() partial_tokens = set() - for name in full_names: - norm_name = self.name_processor.get_normalized(name) - full, part = self._cache.names.get(norm_name, (None, None)) + for name in names: + analyzer_id = name.get_attr('analyzer') + norm_name = self._normalized(name.name) + if analyzer_id is None: + token_id = norm_name + else: + token_id = f'{norm_name}@{analyzer_id}' + + full, part = self._cache.names.get(token_id, (None, None)) if full is None: - variants = self.name_processor.get_variants_ascii(norm_name) + variants = self.token_analysis.analysis[analyzer_id].get_variants_ascii(norm_name) if not variants: continue with self.conn.cursor() as cur: cur.execute("SELECT (getorcreate_full_word(%s, %s)).*", - (norm_name, variants)) + (token_id, variants)) full, part = cur.fetchone() - self._cache.names[norm_name] = (full, part) + self._cache.names[token_id] = (full, part) full_tokens.add(full) partial_tokens.update(part) @@ -473,23 +524,6 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer): return full_tokens, partial_tokens - @staticmethod - def _compute_full_names(names): - """ Return the set of all full name word ids to be used with the - given dictionary of names. - """ - full_names = set() - for name in (n.strip() for ns in names.values() for n in re.split('[;,]', ns)): - if name: - full_names.add(name) - - brace_idx = name.find('(') - if brace_idx >= 0: - full_names.add(name[:brace_idx].strip()) - - return full_names - - def _add_postcode(self, postcode): """ Make sure the normalized postcode is present in the word table. """ @@ -497,7 +531,7 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer): postcode = self.normalize_postcode(postcode) if postcode not in self._cache.postcodes: - term = self.name_processor.get_search_normalized(postcode) + term = self._search_normalized(postcode) if not term: return @@ -556,30 +590,24 @@ class _TokenInfo: self.data['hnr'] = ';'.join(hnrs) - def add_street(self, fulls, _): + def add_street(self, tokens): """ Add addr:street match terms. """ - if fulls: - self.data['street'] = self._mk_array(fulls) + self.data['street'] = self._mk_array(tokens) - def add_place(self, fulls, partials): + def add_place(self, tokens): """ Add addr:place search and match terms. """ - if fulls: - self.data['place_search'] = self._mk_array(itertools.chain(fulls, partials)) - self.data['place_match'] = self._mk_array(fulls) + if tokens: + self.data['place'] = self._mk_array(tokens) def add_address_terms(self, terms): """ Add additional address terms. """ - tokens = {} - - for key, fulls, partials in terms: - if fulls: - tokens[key] = [self._mk_array(itertools.chain(fulls, partials)), - self._mk_array(fulls)] + tokens = {key: self._mk_array(partials) + for key, partials in terms if partials} if tokens: self.data['addr'] = tokens @@ -593,6 +621,8 @@ class _TokenCache: """ def __init__(self): self.names = {} + self.partials = {} + self.fulls = {} self.postcodes = set() self.housenumbers = {}