]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/icu_rule_loader.py
Merge remote-tracking branch 'upstream/master'
[nominatim.git] / nominatim / tokenizer / icu_rule_loader.py
index 8a564355e35c03e4ed715363d505224efac1a6cd..4c36282ca54bfbd3526d24ead471a3e9fe9dbc33 100644 (file)
@@ -7,17 +7,21 @@
 """
 Helper class to create ICU rules from a configuration file.
 """
 """
 Helper class to create ICU rules from a configuration file.
 """
-import importlib
+from typing import Mapping, Any, Dict, Optional
 import io
 import json
 import logging
 
 import io
 import json
 import logging
 
-from nominatim.config import flatten_config_list
+from icu import Transliterator
+
+from nominatim.config import flatten_config_list, Configuration
 from nominatim.db.properties import set_property, get_property
 from nominatim.db.properties import set_property, get_property
+from nominatim.db.connection import Connection
 from nominatim.errors import UsageError
 from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
 from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
 from nominatim.errors import UsageError
 from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
 from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
-import nominatim.tools.country_info
+from nominatim.tokenizer.token_analysis.base import AnalysisModule, Analyzer
+import nominatim.data.country_info
 
 LOG = logging.getLogger()
 
 
 LOG = logging.getLogger()
 
@@ -26,7 +30,7 @@ DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
 DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
 
 
 DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
 
 
-def _get_section(rules, section):
+def _get_section(rules: Mapping[str, Any], section: str) -> Any:
     """ Get the section named 'section' from the rules. If the section does
         not exist, raise a usage error with a meaningful message.
     """
     """ Get the section named 'section' from the rules. If the section does
         not exist, raise a usage error with a meaningful message.
     """
@@ -41,12 +45,13 @@ class ICURuleLoader:
     """ Compiler for ICU rules from a tokenizer configuration file.
     """
 
     """ Compiler for ICU rules from a tokenizer configuration file.
     """
 
-    def __init__(self, config):
+    def __init__(self, config: Configuration) -> None:
+        self.config = config
         rules = config.load_sub_configuration('icu_tokenizer.yaml',
                                               config='TOKENIZER_CONFIG')
 
         # Make sure country information is available to analyzers and sanitizers.
         rules = config.load_sub_configuration('icu_tokenizer.yaml',
                                               config='TOKENIZER_CONFIG')
 
         # Make sure country information is available to analyzers and sanitizers.
-        nominatim.tools.country_info.setup_country_config(config)
+        nominatim.data.country_info.setup_country_config(config)
 
         self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
         self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
 
         self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
         self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
@@ -57,17 +62,27 @@ class ICURuleLoader:
         self.sanitizer_rules = rules.get('sanitizers', [])
 
 
         self.sanitizer_rules = rules.get('sanitizers', [])
 
 
-    def load_config_from_db(self, conn):
+    def load_config_from_db(self, conn: Connection) -> None:
         """ Get previously saved parts of the configuration from the
             database.
         """
         """ Get previously saved parts of the configuration from the
             database.
         """
-        self.normalization_rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
-        self.transliteration_rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
-        self.analysis_rules = json.loads(get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES))
+        rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
+        if rules is not None:
+            self.normalization_rules = rules
+
+        rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
+        if rules is not None:
+            self.transliteration_rules = rules
+
+        rules = get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES)
+        if rules:
+            self.analysis_rules = json.loads(rules)
+        else:
+            self.analysis_rules = []
         self._setup_analysis()
 
 
         self._setup_analysis()
 
 
-    def save_config_to_db(self, conn):
+    def save_config_to_db(self, conn: Connection) -> None:
         """ Save the part of the configuration that cannot be changed into
             the database.
         """
         """ Save the part of the configuration that cannot be changed into
             the database.
         """
@@ -76,20 +91,20 @@ class ICURuleLoader:
         set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
 
 
         set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
 
 
-    def make_sanitizer(self):
+    def make_sanitizer(self) -> PlaceSanitizer:
         """ Create a place sanitizer from the configured rules.
         """
         """ Create a place sanitizer from the configured rules.
         """
-        return PlaceSanitizer(self.sanitizer_rules)
+        return PlaceSanitizer(self.sanitizer_rules, self.config)
 
 
 
 
-    def make_token_analysis(self):
+    def make_token_analysis(self) -> ICUTokenAnalysis:
         """ Create a token analyser from the reviouly loaded rules.
         """
         return ICUTokenAnalysis(self.normalization_rules,
                                 self.transliteration_rules, self.analysis)
 
 
         """ Create a token analyser from the reviouly loaded rules.
         """
         return ICUTokenAnalysis(self.normalization_rules,
                                 self.transliteration_rules, self.analysis)
 
 
-    def get_search_rules(self):
+    def get_search_rules(self) -> str:
         """ Return the ICU rules to be used during search.
             The rules combine normalization and transliteration.
         """
         """ Return the ICU rules to be used during search.
             The rules combine normalization and transliteration.
         """
@@ -102,26 +117,31 @@ class ICURuleLoader:
         return rules.getvalue()
 
 
         return rules.getvalue()
 
 
-    def get_normalization_rules(self):
+    def get_normalization_rules(self) -> str:
         """ Return rules for normalisation of a term.
         """
         return self.normalization_rules
 
 
         """ Return rules for normalisation of a term.
         """
         return self.normalization_rules
 
 
-    def get_transliteration_rules(self):
+    def get_transliteration_rules(self) -> str:
         """ Return the rules for converting a string into its asciii representation.
         """
         return self.transliteration_rules
 
 
         """ Return the rules for converting a string into its asciii representation.
         """
         return self.transliteration_rules
 
 
-    def _setup_analysis(self):
+    def _setup_analysis(self) -> None:
         """ Process the rules used for creating the various token analyzers.
         """
         """ Process the rules used for creating the various token analyzers.
         """
-        self.analysis = {}
+        self.analysis: Dict[Optional[str], TokenAnalyzerRule]  = {}
 
         if not isinstance(self.analysis_rules, list):
             raise UsageError("Configuration section 'token-analysis' must be a list.")
 
 
         if not isinstance(self.analysis_rules, list):
             raise UsageError("Configuration section 'token-analysis' must be a list.")
 
+        norm = Transliterator.createFromRules("rule_loader_normalization",
+                                              self.normalization_rules)
+        trans = Transliterator.createFromRules("rule_loader_transliteration",
+                                              self.transliteration_rules)
+
         for section in self.analysis_rules:
             name = section.get('id', None)
             if name in self.analysis:
         for section in self.analysis_rules:
             name = section.get('id', None)
             if name in self.analysis:
@@ -131,11 +151,12 @@ class ICURuleLoader:
                     LOG.fatal("ICU tokenizer configuration has two token "
                               "analyzers with id '%s'.", name)
                 raise UsageError("Syntax error in ICU tokenizer config.")
                     LOG.fatal("ICU tokenizer configuration has two token "
                               "analyzers with id '%s'.", name)
                 raise UsageError("Syntax error in ICU tokenizer config.")
-            self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules)
+            self.analysis[name] = TokenAnalyzerRule(section, norm, trans,
+                                                    self.config)
 
 
     @staticmethod
 
 
     @staticmethod
-    def _cfg_to_icu_rules(rules, section):
+    def _cfg_to_icu_rules(rules: Mapping[str, Any], section: str) -> str:
         """ Load an ICU ruleset from the given section. If the section is a
             simple string, it is interpreted as a file name and the rules are
             loaded verbatim from the given file. The filename is expected to be
         """ Load an ICU ruleset from the given section. If the section is a
             simple string, it is interpreted as a file name and the rules are
             loaded verbatim from the given file. The filename is expected to be
@@ -155,12 +176,21 @@ class TokenAnalyzerRule:
         and creates a new token analyzer on request.
     """
 
         and creates a new token analyzer on request.
     """
 
-    def __init__(self, rules, normalization_rules):
-        # Find the analysis module
-        module_name = 'nominatim.tokenizer.token_analysis.' \
-                      + _get_section(rules, 'analyzer').replace('-', '_')
-        analysis_mod = importlib.import_module(module_name)
-        self.create = analysis_mod.create
+    def __init__(self, rules: Mapping[str, Any],
+                 normalizer: Any, transliterator: Any,
+                 config: Configuration) -> None:
+        analyzer_name = _get_section(rules, 'analyzer')
+        if not analyzer_name or not isinstance(analyzer_name, str):
+            raise UsageError("'analyzer' parameter needs to be simple string")
+
+        self._analysis_mod: AnalysisModule = \
+            config.load_plugin_module(analyzer_name, 'nominatim.tokenizer.token_analysis')
 
 
-        # Load the configuration.
-        self.config = analysis_mod.configure(rules, normalization_rules)
+        self.config = self._analysis_mod.configure(rules, normalizer,
+                                                   transliterator)
+
+
+    def create(self, normalizer: Any, transliterator: Any) -> Analyzer:
+        """ Create a new analyser instance for the given rule.
+        """
+        return self._analysis_mod.create(normalizer, transliterator, self.config)