1 # SPDX-License-Identifier: GPL-2.0-only
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2022 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Generic processor for names that creates abbreviation variants.
10 from collections import defaultdict, namedtuple
14 from icu import Transliterator
17 from nominatim.config import flatten_config_list
18 from nominatim.errors import UsageError
20 ### Configuration section
22 ICUVariant = namedtuple('ICUVariant', ['source', 'replacement'])
24 def configure(rules, normalization_rules):
25 """ Extract and preprocess the configuration for this module.
29 config['replacements'], config['chars'] = _get_variant_config(rules.get('variants'),
31 config['variant_only'] = rules.get('mode', '') == 'variant-only'
36 def _get_variant_config(rules, normalization_rules):
37 """ Convert the variant definition from the configuration into
40 immediate = defaultdict(list)
45 rules = flatten_config_list(rules, 'variants')
47 vmaker = _VariantMaker(normalization_rules)
50 for rule in (section.get('words') or []):
51 vset.update(vmaker.compute(rule))
53 # Intermediate reorder by source. Also compute required character set.
55 if variant.source[-1] == ' ' and variant.replacement[-1] == ' ':
56 replstr = variant.replacement[:-1]
58 replstr = variant.replacement
59 immediate[variant.source].append(replstr)
60 chars.update(variant.source)
62 return list(immediate.items()), ''.join(chars)
66 """ Generater for all necessary ICUVariants from a single variant rule.
68 All text in rules is normalized to make sure the variants match later.
71 def __init__(self, norm_rules):
72 self.norm = Transliterator.createFromRules("rule_loader_normalization",
76 def compute(self, rule):
77 """ Generator for all ICUVariant tuples from a single variant rule.
79 parts = re.split(r'(\|)?([=-])>', rule)
81 raise UsageError("Syntax error in variant rule: " + rule)
83 decompose = parts[1] is None
84 src_terms = [self._parse_variant_word(t) for t in parts[0].split(',')]
85 repl_terms = (self.norm.transliterate(t).strip() for t in parts[3].split(','))
87 # If the source should be kept, add a 1:1 replacement
91 for froms, tos in _create_variants(*src, src[0], decompose):
92 yield ICUVariant(froms, tos)
94 for src, repl in itertools.product(src_terms, repl_terms):
96 for froms, tos in _create_variants(*src, repl, decompose):
97 yield ICUVariant(froms, tos)
100 def _parse_variant_word(self, name):
102 match = re.fullmatch(r'([~^]?)([^~$^]*)([~$]?)', name)
103 if match is None or (match.group(1) == '~' and match.group(3) == '~'):
104 raise UsageError("Invalid variant word descriptor '{}'".format(name))
105 norm_name = self.norm.transliterate(match.group(2)).strip()
109 return norm_name, match.group(1), match.group(3)
112 _FLAG_MATCH = {'^': '^ ',
117 def _create_variants(src, preflag, postflag, repl, decompose):
119 postfix = _FLAG_MATCH[postflag]
120 # suffix decomposition
122 repl = repl + postfix
125 yield ' ' + src, ' ' + repl
128 yield src, ' ' + repl
129 yield ' ' + src, repl
130 elif postflag == '~':
131 # prefix decomposition
132 prefix = _FLAG_MATCH[preflag]
137 yield src + ' ', repl + ' '
140 yield src, repl + ' '
141 yield src + ' ', repl
143 prefix = _FLAG_MATCH[preflag]
144 postfix = _FLAG_MATCH[postflag]
146 yield prefix + src + postfix, prefix + repl + postfix
151 def create(transliterator, config):
152 """ Create a new token analysis instance for this module.
154 return GenericTokenAnalysis(transliterator, config)
157 class GenericTokenAnalysis:
158 """ Collects the different transformation rules for normalisation of names
159 and provides the functions to apply the transformations.
162 def __init__(self, to_ascii, config):
163 self.to_ascii = to_ascii
164 self.variant_only = config['variant_only']
167 if config['replacements']:
168 self.replacements = datrie.Trie(config['chars'])
169 for src, repllist in config['replacements']:
170 self.replacements[src] = repllist
172 self.replacements = None
175 def get_variants_ascii(self, norm_name):
176 """ Compute the spelling variants for the given normalized name
177 and transliterate the result.
180 for variant in self._generate_word_variants(norm_name):
181 if not self.variant_only or variant.strip() != norm_name:
182 trans_name = self.to_ascii.transliterate(variant).strip()
184 results.add(trans_name)
189 def _generate_word_variants(self, norm_name):
190 baseform = '^ ' + norm_name + ' ^'
191 baselen = len(baseform)
195 if self.replacements is not None:
199 full, repl = self.replacements.longest_prefix_item(baseform[pos:],
202 done = baseform[startpos:pos]
203 partials = [v + done + r
204 for v, r in itertools.product(partials, repl)
205 if not force_space or r.startswith(' ')]
206 if len(partials) > 128:
207 # If too many variants are produced, they are unlikely
208 # to be helpful. Only use the original term.
211 startpos = pos + len(full)
220 # No variants detected? Fast return.
224 if startpos < baselen:
225 return (part[1:] + baseform[startpos:-1] for part in partials)
227 return (part[1:-1] for part in partials)