]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tokenizer/token_analysis/generic.py
add type annotations for indexer
[nominatim.git] / nominatim / tokenizer / token_analysis / generic.py
index b9bd9bdf8d6043794a63bb104b44ac1040954841..3de915ba5254e1859976dd7e9842247df5a58b98 100644 (file)
@@ -11,7 +11,9 @@ import itertools
 
 import datrie
 
+from nominatim.errors import UsageError
 from nominatim.tokenizer.token_analysis.config_variants import get_variant_config
+from nominatim.tokenizer.token_analysis.generic_mutation import MutationVariantGenerator
 
 ### Configuration section
 
@@ -24,15 +26,31 @@ def configure(rules, normalization_rules):
                                                                  normalization_rules)
     config['variant_only'] = rules.get('mode', '') == 'variant-only'
 
+    # parse mutation rules
+    config['mutations'] = []
+    for rule in rules.get('mutations', []):
+        if 'pattern' not in rule:
+            raise UsageError("Missing field 'pattern' in mutation configuration.")
+        if not isinstance(rule['pattern'], str):
+            raise UsageError("Field 'pattern' in mutation configuration "
+                             "must be a simple text field.")
+        if 'replacements' not in rule:
+            raise UsageError("Missing field 'replacements' in mutation configuration.")
+        if not isinstance(rule['replacements'], list):
+            raise UsageError("Field 'replacements' in mutation configuration "
+                             "must be a list of texts.")
+
+        config['mutations'].append((rule['pattern'], rule['replacements']))
+
     return config
 
 
 ### Analysis section
 
-def create(transliterator, config):
+def create(normalizer, transliterator, config):
     """ Create a new token analysis instance for this module.
     """
-    return GenericTokenAnalysis(transliterator, config)
+    return GenericTokenAnalysis(normalizer, transliterator, config)
 
 
 class GenericTokenAnalysis:
@@ -40,7 +58,8 @@ class GenericTokenAnalysis:
         and provides the functions to apply the transformations.
     """
 
-    def __init__(self, to_ascii, config):
+    def __init__(self, norm, to_ascii, config):
+        self.norm = norm
         self.to_ascii = to_ascii
         self.variant_only = config['variant_only']
 
@@ -52,19 +71,38 @@ class GenericTokenAnalysis:
         else:
             self.replacements = None
 
+        # set up mutation rules
+        self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
+
+
+    def normalize(self, name):
+        """ Return the normalized form of the name. This is the standard form
+            from which possible variants for the name can be derived.
+        """
+        return self.norm.transliterate(name).strip()
+
 
     def get_variants_ascii(self, norm_name):
         """ Compute the spelling variants for the given normalized name
             and transliterate the result.
         """
-        results = set()
-        for variant in self._generate_word_variants(norm_name):
-            if not self.variant_only or variant.strip() != norm_name:
-                trans_name = self.to_ascii.transliterate(variant).strip()
-                if trans_name:
-                    results.add(trans_name)
-
-        return list(results)
+        variants = self._generate_word_variants(norm_name)
+
+        for mutation in self.mutations:
+            variants = mutation.generate(variants)
+
+        return [name for name in self._transliterate_unique_list(norm_name, variants) if name]
+
+
+    def _transliterate_unique_list(self, norm_name, iterable):
+        seen = set()
+        if self.variant_only:
+            seen.add(norm_name)
+
+        for variant in map(str.strip, iterable):
+            if variant not in seen:
+                seen.add(variant)
+                yield self.to_ascii.transliterate(variant).strip()
 
 
     def _generate_word_variants(self, norm_name):