+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2022 by the Nominatim developer community.
+# For a full list of authors see the git log.
"""
Helper class to create ICU rules from a configuration file.
"""
+from typing import Mapping, Any, Dict, Optional
+import importlib
import io
-import yaml
+import json
import logging
-from collections import defaultdict
-import itertools
-
-from icu import Transliterator
+from nominatim.config import flatten_config_list, Configuration
+from nominatim.db.properties import set_property, get_property
+from nominatim.db.connection import Connection
from nominatim.errors import UsageError
+from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
+from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
+from nominatim.tokenizer.token_analysis.base import AnalysisModule, Analyser
+import nominatim.data.country_info
LOG = logging.getLogger()
+DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation"
+DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
+DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
+
+
+def _get_section(rules: Mapping[str, Any], section: str) -> Any:
+ """ Get the section named 'section' from the rules. If the section does
+ not exist, raise a usage error with a meaningful message.
+ """
+ if section not in rules:
+ LOG.fatal("Section '%s' not found in tokenizer config.", section)
+ raise UsageError("Syntax error in tokenizer configuration file.")
+
+ return rules[section]
+
class ICURuleLoader:
""" Compiler for ICU rules from a tokenizer configuration file.
"""
- def __init__(self, configfile):
- self.configfile = configfile
+ def __init__(self, config: Configuration) -> None:
+ rules = config.load_sub_configuration('icu_tokenizer.yaml',
+ config='TOKENIZER_CONFIG')
- if configfile.suffix == '.yaml':
- self._load_from_yaml()
- else:
- raise UsageError("Unknown format of tokenizer configuration.")
+ # Make sure country information is available to analyzers and sanitizers.
+ nominatim.data.country_info.setup_country_config(config)
+
+ self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
+ self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
+ self.analysis_rules = _get_section(rules, 'token-analysis')
+ self._setup_analysis()
+
+ # Load optional sanitizer rule set.
+ self.sanitizer_rules = rules.get('sanitizers', [])
- def get_search_rules(self):
- """ Returns the ICU rules to be used during search.
- The rules combine normalization, compound decomposition (including
- abbreviated compounds) and transliteration.
+ def load_config_from_db(self, conn: Connection) -> None:
+ """ Get previously saved parts of the configuration from the
+ database.
"""
- # First apply the normalization rules.
- rules = io.StringIO()
- rules.write(self.normalization_rules)
+ rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
+ if rules is not None:
+ self.normalization_rules = rules
- # For all compound suffixes: add them in their full and any abbreviated form.
- suffixes = set()
- for suffix in self.compound_suffixes:
- suffixes.add(suffix)
- suffixes.update(self.abbreviations.get(suffix, []))
+ rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
+ if rules is not None:
+ self.transliteration_rules = rules
- for suffix in sorted(suffixes, key=lambda x:len(x), reverse=True):
- rules.write("'{0} ' > ' {0} ';".format(suffix))
+ rules = get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES)
+ if rules:
+ self.analysis_rules = json.loads(rules)
+ else:
+ self.analysis_rules = []
+ self._setup_analysis()
- # Finally add transliteration.
- rules.write(self.transliteration_rules)
- return rules.getvalue()
- def get_normalization_rules(self):
- """ Return rules for normalisation of a term.
+ def save_config_to_db(self, conn: Connection) -> None:
+ """ Save the part of the configuration that cannot be changed into
+ the database.
"""
- return self.normalization_rules
+ set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules)
+ set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules)
+ set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
- def get_transliteration_rules(self):
- """ Return the rules for converting a string into its asciii representation.
+
+ def make_sanitizer(self) -> PlaceSanitizer:
+ """ Create a place sanitizer from the configured rules.
"""
- return self.transliteration_rules
+ return PlaceSanitizer(self.sanitizer_rules)
+
- def get_replacement_pairs(self):
- """ Returns the list of possible compound decompositions with
- application of abbreviations included.
- The result is a list of pairs: the first item is the sequence to
- replace, the second is a list of replacements.
+ def make_token_analysis(self) -> ICUTokenAnalysis:
+ """ Create a token analyser from the reviouly loaded rules.
"""
- synonyms = defaultdict(set)
+ return ICUTokenAnalysis(self.normalization_rules,
+ self.transliteration_rules, self.analysis)
- for full, abbr in self.abbreviations.items():
- key = ' ' + full + ' '
- # Entries in the abbreviation list always apply to full words:
- synonyms[key].update((' ' + a + ' ' for a in abbr))
- # Replacements are optional, so add a noop
- synonyms[key].add(key)
- # Entries in the compound list expand to themselves and to
- # abbreviations.
- for suffix in self.compound_suffixes:
- keyset = synonyms[suffix + ' ']
- keyset.add(' ' + suffix + ' ')
- keyset.update((' ' + a + ' ' for a in self.abbreviations.get(suffix, [])))
- # The terms the entries are shortended to, need to be decompunded as well.
- for abbr in self.abbreviations.get(suffix, []):
- synonyms[abbr + ' '].add(' ' + abbr + ' ')
+ def get_search_rules(self) -> str:
+ """ Return the ICU rules to be used during search.
+ The rules combine normalization and transliteration.
+ """
+ # First apply the normalization rules.
+ rules = io.StringIO()
+ rules.write(self.normalization_rules)
- # sort the resulting list by descending length (longer matches are prefered).
- sorted_keys = sorted(synonyms.keys(), key=lambda x: len(x), reverse=True)
+ # Then add transliteration.
+ rules.write(self.transliteration_rules)
+ return rules.getvalue()
- return [(k, list(synonyms[k])) for k in sorted_keys]
+ def get_normalization_rules(self) -> str:
+ """ Return rules for normalisation of a term.
+ """
+ return self.normalization_rules
- def _load_from_yaml(self):
- rules = yaml.load(self.configfile.read_text())
- self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
- self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
- self._parse_compound_suffix_list(self._get_section(rules, 'compound_suffixes'))
- self._parse_abbreviation_list(self._get_section(rules, 'abbreviations'))
+ def get_transliteration_rules(self) -> str:
+ """ Return the rules for converting a string into its asciii representation.
+ """
+ return self.transliteration_rules
- def _get_section(self, rules, section):
- """ Get the section named 'section' from the rules. If the section does
- not exist, raise a usage error with a meaningful message.
+ def _setup_analysis(self) -> None:
+ """ Process the rules used for creating the various token analyzers.
"""
- if section not in rules:
- LOG.fatal("Section '%s' not found in tokenizer config '%s'.",
- section, str(self.configfile))
- raise UsageError("Syntax error in tokenizer configuration file.")
+ self.analysis: Dict[Optional[str], TokenAnalyzerRule] = {}
+
+ if not isinstance(self.analysis_rules, list):
+ raise UsageError("Configuration section 'token-analysis' must be a list.")
- return rules[section]
+ for section in self.analysis_rules:
+ name = section.get('id', None)
+ if name in self.analysis:
+ if name is None:
+ LOG.fatal("ICU tokenizer configuration has two default token analyzers.")
+ else:
+ LOG.fatal("ICU tokenizer configuration has two token "
+ "analyzers with id '%s'.", name)
+ raise UsageError("Syntax error in ICU tokenizer config.")
+ self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules)
- def _cfg_to_icu_rules(self, rules, section):
+ @staticmethod
+ def _cfg_to_icu_rules(rules: Mapping[str, Any], section: str) -> str:
""" Load an ICU ruleset from the given section. If the section is a
simple string, it is interpreted as a file name and the rules are
loaded verbatim from the given file. The filename is expected to be
relative to the tokenizer rule file. If the section is a list then
each line is assumed to be a rule. All rules are concatenated and returned.
"""
- content = self._get_section(rules, section)
-
- if isinstance(content, str):
- return (self.configfile.parent / content).read_text().replace('\n', ' ')
-
- return ';'.join(content) + ';'
-
+ content = _get_section(rules, section)
- def _parse_compound_suffix_list(self, rules):
- if not rules:
- self.compound_suffixes = set()
- return
+ if content is None:
+ return ''
- norm = Transliterator.createFromRules("rule_loader_normalization",
- self.normalization_rules)
+ return ';'.join(flatten_config_list(content, section)) + ';'
- # Make sure all suffixes are in their normalised form.
- self.compound_suffixes = set((norm.transliterate(s) for s in rules))
+class TokenAnalyzerRule:
+ """ Factory for a single analysis module. The class saves the configuration
+ and creates a new token analyzer on request.
+ """
- def _parse_abbreviation_list(self, rules):
- self.abbreviations = defaultdict(list)
-
- if not rules:
- return
-
- norm = Transliterator.createFromRules("rule_loader_normalization",
- self.normalization_rules)
-
- for rule in rules:
- parts = rule.split('=>')
- if len(parts) != 2:
- LOG.fatal("Syntax error in abbreviation section, line: %s", rule)
- raise UsageError("Syntax error in tokenizer configuration file.")
+ def __init__(self, rules: Mapping[str, Any], normalization_rules: str) -> None:
+ # Find the analysis module
+ module_name = 'nominatim.tokenizer.token_analysis.' \
+ + _get_section(rules, 'analyzer').replace('-', '_')
+ self._analysis_mod: AnalysisModule = importlib.import_module(module_name)
- # Make sure all terms match the normalised version.
- fullterms = (norm.transliterate(t.strip()) for t in parts[0].split(','))
- abbrterms = (norm.transliterate(t.strip()) for t in parts[1].split(','))
+ # Load the configuration.
+ self.config = self._analysis_mod.configure(rules, normalization_rules)
- for full, abbr in itertools.product(fullterms, abbrterms):
- self.abbreviations[full].append(abbr)
+ def create(self, normalizer: Any, transliterator: Any) -> Analyser:
+ """ Create a new analyser instance for the given rule.
+ """
+ return self._analysis_mod.create(normalizer, transliterator, self.config)