]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/icu_rule_loader.py
extend ICU config to accomodate multiple analysers
[nominatim.git] / nominatim / tokenizer / icu_rule_loader.py
index d3141bf7341691daba70c57db46ab6ed20256ae5..cf72520953456e9318576f51d9fc7acc280d668e 100644 (file)
@@ -2,34 +2,25 @@
 Helper class to create ICU rules from a configuration file.
 """
 import io
+import json
 import logging
 import itertools
-from pathlib import Path
 import re
 
-import yaml
 from icu import Transliterator
 
+from nominatim.config import flatten_config_list
+from nominatim.db.properties import set_property, get_property
 from nominatim.errors import UsageError
+from nominatim.tokenizer.icu_name_processor import ICUNameProcessor
+from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
 import nominatim.tokenizer.icu_variants as variants
 
 LOG = logging.getLogger()
 
-def _flatten_yaml_list(content):
-    if not content:
-        return []
-
-    if not isinstance(content, list):
-        raise UsageError("List expected in ICU yaml configuration.")
-
-    output = []
-    for ele in content:
-        if isinstance(ele, list):
-            output.extend(_flatten_yaml_list(ele))
-        else:
-            output.append(ele)
-
-    return output
+DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation"
+DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
+DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
 
 
 class VariantRule:
@@ -48,14 +39,49 @@ class ICURuleLoader:
     """ Compiler for ICU rules from a tokenizer configuration file.
     """
 
-    def __init__(self, configfile):
-        self.configfile = configfile
-        self.variants = set()
+    def __init__(self, config):
+        rules = config.load_sub_configuration('icu_tokenizer.yaml',
+                                              config='TOKENIZER_CONFIG')
+
+        self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
+        self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
+        self.analysis_rules = self._get_section(rules, 'token-analysis')
+        self._setup_analysis()
+
+        # Load optional sanitizer rule set.
+        self.sanitizer_rules = rules.get('sanitizers', [])
+
+
+    def load_config_from_db(self, conn):
+        """ Get previously saved parts of the configuration from the
+            database.
+        """
+        self.normalization_rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
+        self.transliteration_rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
+        self.analysis_rules = json.loads(get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES))
+        self._setup_analysis()
+
+
+    def save_config_to_db(self, conn):
+        """ Save the part of the configuration that cannot be changed into
+            the database.
+        """
+        set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules)
+        set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules)
+        set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
+
+
+    def make_sanitizer(self):
+        """ Create a place sanitizer from the configured rules.
+        """
+        return PlaceSanitizer(self.sanitizer_rules)
+
 
-        if configfile.suffix == '.yaml':
-            self._load_from_yaml()
-        else:
-            raise UsageError("Unknown format of tokenizer configuration.")
+    def make_token_analysis(self):
+        """ Create a token analyser from the reviouly loaded rules.
+        """
+        return self.analysis[None].create(self.normalization_rules,
+                                          self.transliteration_rules)
 
 
     def get_search_rules(self):
@@ -70,52 +96,46 @@ class ICURuleLoader:
         rules.write(self.transliteration_rules)
         return rules.getvalue()
 
+
     def get_normalization_rules(self):
         """ Return rules for normalisation of a term.
         """
         return self.normalization_rules
 
+
     def get_transliteration_rules(self):
         """ Return the rules for converting a string into its asciii representation.
         """
         return self.transliteration_rules
 
-    def get_replacement_pairs(self):
-        """ Return the list of possible compound decompositions with
-            application of abbreviations included.
-            The result is a list of pairs: the first item is the sequence to
-            replace, the second is a list of replacements.
-        """
-        return self.variants
-
-    def _yaml_include_representer(self, loader, node):
-        value = loader.construct_scalar(node)
-
-        if Path(value).is_absolute():
-            content = Path(value).read_text()
-        else:
-            content = (self.configfile.parent / value).read_text()
-
-        return yaml.safe_load(content)
 
+    def _setup_analysis(self):
+        """ Process the rules used for creating the various token analyzers.
+        """
+        self.analysis = {}
 
-    def _load_from_yaml(self):
-        yaml.add_constructor('!include', self._yaml_include_representer,
-                             Loader=yaml.SafeLoader)
-        rules = yaml.safe_load(self.configfile.read_text())
+        if not isinstance(self.analysis_rules, list):
+            raise UsageError("Configuration section 'token-analysis' must be a list.")
 
-        self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
-        self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
-        self._parse_variant_list(self._get_section(rules, 'variants'))
+        for section in self.analysis_rules:
+            name = section.get('id', None)
+            if name in self.analysis:
+                if name is None:
+                    LOG.fatal("ICU tokenizer configuration has two default token analyzers.")
+                else:
+                    LOG.fatal("ICU tokenizer configuration has two token "
+                              "analyzers with id '%s'.", name)
+                UsageError("Syntax error in ICU tokenizer config.")
+            self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules)
 
 
-    def _get_section(self, rules, section):
+    @staticmethod
+    def _get_section(rules, section):
         """ Get the section named 'section' from the rules. If the section does
             not exist, raise a usage error with a meaningful message.
         """
         if section not in rules:
-            LOG.fatal("Section '%s' not found in tokenizer config '%s'.",
-                      section, str(self.configfile))
+            LOG.fatal("Section '%s' not found in tokenizer config.", section)
             raise UsageError("Syntax error in tokenizer configuration file.")
 
         return rules[section]
@@ -133,18 +153,35 @@ class ICURuleLoader:
         if content is None:
             return ''
 
-        return ';'.join(_flatten_yaml_list(content)) + ';'
+        return ';'.join(flatten_config_list(content, section)) + ';'
+
+
+class TokenAnalyzerRule:
+    """ Factory for a single analysis module. The class saves the configuration
+        and creates a new token analyzer on request.
+    """
+
+    def __init__(self, rules, normalization_rules):
+        self._parse_variant_list(rules.get('variants'), normalization_rules)
 
 
-    def _parse_variant_list(self, rules):
-        self.variants.clear()
+    def create(self, normalization_rules, transliteration_rules):
+        """ Create an analyzer from the given rules.
+        """
+        return ICUNameProcessor(normalization_rules,
+                                transliteration_rules,
+                                self.variants)
+
+
+    def _parse_variant_list(self, rules, normalization_rules):
+        self.variants = set()
 
         if not rules:
             return
 
-        rules = _flatten_yaml_list(rules)
+        rules = flatten_config_list(rules, 'variants')
 
-        vmaker = _VariantMaker(self.normalization_rules)
+        vmaker = _VariantMaker(normalization_rules)
 
         properties = []
         for section in rules: