1 # SPDX-License-Identifier: GPL-2.0-only
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2022 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Helper class to create ICU rules from a configuration file.
15 from nominatim.config import flatten_config_list
16 from nominatim.db.properties import set_property, get_property
17 from nominatim.errors import UsageError
18 from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
19 from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
20 import nominatim.tools.country_info
22 LOG = logging.getLogger()
24 DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation"
25 DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
26 DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
29 def _get_section(rules, section):
30 """ Get the section named 'section' from the rules. If the section does
31 not exist, raise a usage error with a meaningful message.
33 if section not in rules:
34 LOG.fatal("Section '%s' not found in tokenizer config.", section)
35 raise UsageError("Syntax error in tokenizer configuration file.")
41 """ Compiler for ICU rules from a tokenizer configuration file.
44 def __init__(self, config):
45 rules = config.load_sub_configuration('icu_tokenizer.yaml',
46 config='TOKENIZER_CONFIG')
48 # Make sure country information is available to analyzers and sanitizers.
49 nominatim.tools.country_info.setup_country_config(config)
51 self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
52 self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
53 self.analysis_rules = _get_section(rules, 'token-analysis')
54 self._setup_analysis()
56 # Load optional sanitizer rule set.
57 self.sanitizer_rules = rules.get('sanitizers', [])
60 def load_config_from_db(self, conn):
61 """ Get previously saved parts of the configuration from the
64 self.normalization_rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
65 self.transliteration_rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
66 self.analysis_rules = json.loads(get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES))
67 self._setup_analysis()
70 def save_config_to_db(self, conn):
71 """ Save the part of the configuration that cannot be changed into
74 set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules)
75 set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules)
76 set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
79 def make_sanitizer(self):
80 """ Create a place sanitizer from the configured rules.
82 return PlaceSanitizer(self.sanitizer_rules)
85 def make_token_analysis(self):
86 """ Create a token analyser from the reviouly loaded rules.
88 return ICUTokenAnalysis(self.normalization_rules,
89 self.transliteration_rules, self.analysis)
92 def get_search_rules(self):
93 """ Return the ICU rules to be used during search.
94 The rules combine normalization and transliteration.
96 # First apply the normalization rules.
98 rules.write(self.normalization_rules)
100 # Then add transliteration.
101 rules.write(self.transliteration_rules)
102 return rules.getvalue()
105 def get_normalization_rules(self):
106 """ Return rules for normalisation of a term.
108 return self.normalization_rules
111 def get_transliteration_rules(self):
112 """ Return the rules for converting a string into its asciii representation.
114 return self.transliteration_rules
117 def _setup_analysis(self):
118 """ Process the rules used for creating the various token analyzers.
122 if not isinstance(self.analysis_rules, list):
123 raise UsageError("Configuration section 'token-analysis' must be a list.")
125 for section in self.analysis_rules:
126 name = section.get('id', None)
127 if name in self.analysis:
129 LOG.fatal("ICU tokenizer configuration has two default token analyzers.")
131 LOG.fatal("ICU tokenizer configuration has two token "
132 "analyzers with id '%s'.", name)
133 raise UsageError("Syntax error in ICU tokenizer config.")
134 self.analysis[name] = TokenAnalyzerRule(section, self.normalization_rules)
138 def _cfg_to_icu_rules(rules, section):
139 """ Load an ICU ruleset from the given section. If the section is a
140 simple string, it is interpreted as a file name and the rules are
141 loaded verbatim from the given file. The filename is expected to be
142 relative to the tokenizer rule file. If the section is a list then
143 each line is assumed to be a rule. All rules are concatenated and returned.
145 content = _get_section(rules, section)
150 return ';'.join(flatten_config_list(content, section)) + ';'
153 class TokenAnalyzerRule:
154 """ Factory for a single analysis module. The class saves the configuration
155 and creates a new token analyzer on request.
158 def __init__(self, rules, normalization_rules):
159 # Find the analysis module
160 module_name = 'nominatim.tokenizer.token_analysis.' \
161 + _get_section(rules, 'analyzer').replace('-', '_')
162 analysis_mod = importlib.import_module(module_name)
163 self.create = analysis_mod.create
165 # Load the configuration.
166 self.config = analysis_mod.configure(rules, normalization_rules)