X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/336258ecf82548a46715b7165b0547dacc161e07..52847b61a3e1bc0791dd23809dc3c50fe6810df2:/nominatim/tokenizer/icu_rule_loader.py diff --git a/nominatim/tokenizer/icu_rule_loader.py b/nominatim/tokenizer/icu_rule_loader.py index 0e6e40b4..cf725209 100644 --- a/nominatim/tokenizer/icu_rule_loader.py +++ b/nominatim/tokenizer/icu_rule_loader.py @@ -2,32 +2,25 @@ Helper class to create ICU rules from a configuration file. """ import io +import json import logging import itertools import re from icu import Transliterator +from nominatim.config import flatten_config_list +from nominatim.db.properties import set_property, get_property from nominatim.errors import UsageError +from nominatim.tokenizer.icu_name_processor import ICUNameProcessor +from nominatim.tokenizer.place_sanitizer import PlaceSanitizer import nominatim.tokenizer.icu_variants as variants LOG = logging.getLogger() -def _flatten_config_list(content): - if not content: - return [] - - if not isinstance(content, list): - raise UsageError("List expected in ICU configuration.") - - output = [] - for ele in content: - if isinstance(ele, list): - output.extend(_flatten_config_list(ele)) - else: - output.append(ele) - - return output +DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation" +DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration" +DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules" class VariantRule: @@ -46,12 +39,49 @@ class ICURuleLoader: """ Compiler for ICU rules from a tokenizer configuration file. """ - def __init__(self, rules): - self.variants = set() + def __init__(self, config): + rules = config.load_sub_configuration('icu_tokenizer.yaml', + config='TOKENIZER_CONFIG') self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization') self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration') - self._parse_variant_list(self._get_section(rules, 'variants')) + self.analysis_rules = self._get_section(rules, 'token-analysis') + self._setup_analysis() + + # Load optional sanitizer rule set. + self.sanitizer_rules = rules.get('sanitizers', []) + + + def load_config_from_db(self, conn): + """ Get previously saved parts of the configuration from the + database. + """ + self.normalization_rules = get_property(conn, DBCFG_IMPORT_NORM_RULES) + self.transliteration_rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES) + self.analysis_rules = json.loads(get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES)) + self._setup_analysis() + + + def save_config_to_db(self, conn): + """ Save the part of the configuration that cannot be changed into + the database. + """ + set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules) + set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules) + set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules)) + + + def make_sanitizer(self): + """ Create a place sanitizer from the configured rules. + """ + return PlaceSanitizer(self.sanitizer_rules) + + + def make_token_analysis(self): + """ Create a token analyser from the reviouly loaded rules. + """ + return self.analysis[None].create(self.normalization_rules, + self.transliteration_rules) def get_search_rules(self): @@ -66,23 +96,37 @@ class ICURuleLoader: rules.write(self.transliteration_rules) return rules.getvalue() + def get_normalization_rules(self): """ Return rules for normalisation of a term. """ return self.normalization_rules + def get_transliteration_rules(self): """ Return the rules for converting a string into its asciii representation. """ return self.transliteration_rules - def get_replacement_pairs(self): - """ Return the list of possible compound decompositions with - application of abbreviations included. - The result is a list of pairs: the first item is the sequence to - replace, the second is a list of replacements. + + def _setup_analysis(self): + """ Process the rules used for creating the various token analyzers. """ - return self.variants + self.analysis = {} + + if not isinstance(self.analysis_rules, list): + raise UsageError("Configuration section 'token-analysis' must be a list.") + + for section in self.analysis_rules: + name = section.get('id', None) + if name in self.analysis: + if name is None: + LOG.fatal("ICU tokenizer configuration has two default token analyzers.") + else: + LOG.fatal("ICU tokenizer configuration has two token " + "analyzers with id '%s'.", name) + UsageError("Syntax error in ICU tokenizer config.") + self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules) @staticmethod @@ -109,18 +153,35 @@ class ICURuleLoader: if content is None: return '' - return ';'.join(_flatten_config_list(content)) + ';' + return ';'.join(flatten_config_list(content, section)) + ';' - def _parse_variant_list(self, rules): - self.variants.clear() +class TokenAnalyzerRule: + """ Factory for a single analysis module. The class saves the configuration + and creates a new token analyzer on request. + """ + + def __init__(self, rules, normalization_rules): + self._parse_variant_list(rules.get('variants'), normalization_rules) + + + def create(self, normalization_rules, transliteration_rules): + """ Create an analyzer from the given rules. + """ + return ICUNameProcessor(normalization_rules, + transliteration_rules, + self.variants) + + + def _parse_variant_list(self, rules, normalization_rules): + self.variants = set() if not rules: return - rules = _flatten_config_list(rules) + rules = flatten_config_list(rules, 'variants') - vmaker = _VariantMaker(self.normalization_rules) + vmaker = _VariantMaker(normalization_rules) properties = [] for section in rules: