X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/7cfcbacfc75ab2e39ee7eab6a5cf40e8cbd152f5..a6dab5e300de9c5664f714eb2c9290d18f01067f:/nominatim/tokenizer/icu_rule_loader.py diff --git a/nominatim/tokenizer/icu_rule_loader.py b/nominatim/tokenizer/icu_rule_loader.py index a8bdba93..035b6698 100644 --- a/nominatim/tokenizer/icu_rule_loader.py +++ b/nominatim/tokenizer/icu_rule_loader.py @@ -1,3 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# This file is part of Nominatim. (https://nominatim.org) +# +# Copyright (C) 2022 by the Nominatim developer community. +# For a full list of authors see the git log. """ Helper class to create ICU rules from a configuration file. """ @@ -5,16 +11,13 @@ import importlib import io import json import logging -import itertools -import re - -from icu import Transliterator from nominatim.config import flatten_config_list from nominatim.db.properties import set_property, get_property from nominatim.errors import UsageError from nominatim.tokenizer.place_sanitizer import PlaceSanitizer -import nominatim.tokenizer.icu_variants as variants +from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis +import nominatim.data.country_info LOG = logging.getLogger() @@ -34,18 +37,6 @@ def _get_section(rules, section): return rules[section] -class VariantRule: - """ Saves a single variant expansion. - - An expansion consists of the normalized replacement term and - a dicitonary of properties that describe when the expansion applies. - """ - - def __init__(self, replacement, properties): - self.replacement = replacement - self.properties = properties or {} - - class ICURuleLoader: """ Compiler for ICU rules from a tokenizer configuration file. """ @@ -54,6 +45,9 @@ class ICURuleLoader: rules = config.load_sub_configuration('icu_tokenizer.yaml', config='TOKENIZER_CONFIG') + # Make sure country information is available to analyzers and sanitizers. + nominatim.data.country_info.setup_country_config(config) + self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization') self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration') self.analysis_rules = _get_section(rules, 'token-analysis') @@ -91,8 +85,8 @@ class ICURuleLoader: def make_token_analysis(self): """ Create a token analyser from the reviouly loaded rules. """ - return self.analysis[None].create(self.normalization_rules, - self.transliteration_rules) + return ICUTokenAnalysis(self.normalization_rules, + self.transliteration_rules, self.analysis) def get_search_rules(self): @@ -136,7 +130,7 @@ class ICURuleLoader: else: LOG.fatal("ICU tokenizer configuration has two token " "analyzers with id '%s'.", name) - UsageError("Syntax error in ICU tokenizer config.") + raise UsageError("Syntax error in ICU tokenizer config.") self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules) @@ -166,128 +160,7 @@ class TokenAnalyzerRule: module_name = 'nominatim.tokenizer.token_analysis.' \ + _get_section(rules, 'analyzer').replace('-', '_') analysis_mod = importlib.import_module(module_name) - self._mod_create = analysis_mod.create + self.create = analysis_mod.create # Load the configuration. - self.config = {} - self._parse_variant_list(rules.get('variants'), normalization_rules) - - - def create(self, normalization_rules, transliteration_rules): - """ Create an analyzer from the given rules. - """ - return self._mod_create(normalization_rules, - transliteration_rules, - self.config) - - - def _parse_variant_list(self, rules, normalization_rules): - vset = set() - - if not rules: - return - - rules = flatten_config_list(rules, 'variants') - - vmaker = _VariantMaker(normalization_rules) - - properties = [] - for section in rules: - # Create the property field and deduplicate against existing - # instances. - props = variants.ICUVariantProperties.from_rules(section) - for existing in properties: - if existing == props: - props = existing - break - else: - properties.append(props) - - for rule in (section.get('words') or []): - vset.update(vmaker.compute(rule, props)) - - self.config['variants'] = vset - - -class _VariantMaker: - """ Generater for all necessary ICUVariants from a single variant rule. - - All text in rules is normalized to make sure the variants match later. - """ - - def __init__(self, norm_rules): - self.norm = Transliterator.createFromRules("rule_loader_normalization", - norm_rules) - - - def compute(self, rule, props): - """ Generator for all ICUVariant tuples from a single variant rule. - """ - parts = re.split(r'(\|)?([=-])>', rule) - if len(parts) != 4: - raise UsageError("Syntax error in variant rule: " + rule) - - decompose = parts[1] is None - src_terms = [self._parse_variant_word(t) for t in parts[0].split(',')] - repl_terms = (self.norm.transliterate(t.strip()) for t in parts[3].split(',')) - - # If the source should be kept, add a 1:1 replacement - if parts[2] == '-': - for src in src_terms: - if src: - for froms, tos in _create_variants(*src, src[0], decompose): - yield variants.ICUVariant(froms, tos, props) - - for src, repl in itertools.product(src_terms, repl_terms): - if src and repl: - for froms, tos in _create_variants(*src, repl, decompose): - yield variants.ICUVariant(froms, tos, props) - - - def _parse_variant_word(self, name): - name = name.strip() - match = re.fullmatch(r'([~^]?)([^~$^]*)([~$]?)', name) - if match is None or (match.group(1) == '~' and match.group(3) == '~'): - raise UsageError("Invalid variant word descriptor '{}'".format(name)) - norm_name = self.norm.transliterate(match.group(2)) - if not norm_name: - return None - - return norm_name, match.group(1), match.group(3) - - -_FLAG_MATCH = {'^': '^ ', - '$': ' ^', - '': ' '} - - -def _create_variants(src, preflag, postflag, repl, decompose): - if preflag == '~': - postfix = _FLAG_MATCH[postflag] - # suffix decomposition - src = src + postfix - repl = repl + postfix - - yield src, repl - yield ' ' + src, ' ' + repl - - if decompose: - yield src, ' ' + repl - yield ' ' + src, repl - elif postflag == '~': - # prefix decomposition - prefix = _FLAG_MATCH[preflag] - src = prefix + src - repl = prefix + repl - - yield src, repl - yield src + ' ', repl + ' ' - - if decompose: - yield src, repl + ' ' - yield src + ' ', repl - else: - prefix = _FLAG_MATCH[preflag] - postfix = _FLAG_MATCH[postflag] - - yield prefix + src + postfix, prefix + repl + postfix + self.config = analysis_mod.configure(rules, normalization_rules)