1 # SPDX-License-Identifier: GPL-3.0-or-later
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2024 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Implementation of query analysis for the ICU tokenizer.
10 from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
14 from itertools import zip_longest
16 from icu import Transliterator
18 import sqlalchemy as sa
20 from ..errors import UsageError
21 from ..typing import SaRow
22 from ..sql.sqlalchemy_types import Json
23 from ..connection import SearchConnection
24 from ..logging import log
25 from . import query as qmod
26 from ..query_preprocessing.config import QueryConfig
27 from .query_analyzer_factory import AbstractQueryAnalyzer
32 'w': qmod.TOKEN_PARTIAL,
33 'H': qmod.TOKEN_HOUSENUMBER,
34 'P': qmod.TOKEN_POSTCODE,
35 'C': qmod.TOKEN_COUNTRY
38 PENALTY_IN_TOKEN_BREAK = {
39 qmod.BREAK_START: 0.5,
41 qmod.BREAK_PHRASE: 0.5,
42 qmod.BREAK_SOFT_PHRASE: 0.5,
49 @dataclasses.dataclass
50 class ICUToken(qmod.Token):
51 """ Specialised token for ICU tokenizer.
54 info: Optional[Dict[str, Any]]
56 def get_category(self) -> Tuple[str, str]:
58 return self.info.get('class', ''), self.info.get('type', '')
60 def rematch(self, norm: str) -> None:
61 """ Check how well the token matches the given normalized string
62 and add a penalty, if necessary.
64 if not self.lookup_word:
67 seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
69 for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
70 if tag in ('delete', 'insert') and (afrom == 0 or ato == len(self.lookup_word)):
72 elif tag == 'replace':
73 distance += max((ato-afrom), (bto-bfrom))
75 distance += abs((ato-afrom) - (bto-bfrom))
76 self.penalty += (distance/len(self.lookup_word))
79 def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
80 """ Create a ICUToken from the row of the word table.
82 count = 1 if row.info is None else row.info.get('count', 1)
83 addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
85 penalty = base_penalty
89 if len(row.word_token) == 1 and row.word_token == row.word:
90 penalty += 0.2 if row.word.isdigit() else 0.3
92 penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
93 if all(not c.isdigit() for c in row.word_token):
94 penalty += 0.2 * (len(row.word_token) - 1)
96 if len(row.word_token) == 1:
100 lookup_word = row.word
102 lookup_word = row.info.get('lookup', row.word)
104 lookup_word = lookup_word.split('@', 1)[0]
106 lookup_word = row.word_token
108 return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
109 lookup_word=lookup_word,
110 word_token=row.word_token, info=row.info,
111 addr_count=max(1, addr_count))
114 class ICUQueryAnalyzer(AbstractQueryAnalyzer):
115 """ Converter for query strings into a tokenized query
116 using the tokens created by a ICU tokenizer.
118 def __init__(self, conn: SearchConnection) -> None:
121 async def setup(self) -> None:
122 """ Set up static data structures needed for the analysis.
124 async def _make_normalizer() -> Any:
125 rules = await self.conn.get_property('tokenizer_import_normalisation')
126 return Transliterator.createFromRules("normalization", rules)
128 self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
131 async def _make_transliterator() -> Any:
132 rules = await self.conn.get_property('tokenizer_import_transliteration')
133 return Transliterator.createFromRules("transliteration", rules)
135 self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
136 _make_transliterator)
138 await self._setup_preprocessing()
140 if 'word' not in self.conn.t.meta.tables:
141 sa.Table('word', self.conn.t.meta,
142 sa.Column('word_id', sa.Integer),
143 sa.Column('word_token', sa.Text, nullable=False),
144 sa.Column('type', sa.Text, nullable=False),
145 sa.Column('word', sa.Text),
146 sa.Column('info', Json))
148 async def _setup_preprocessing(self) -> None:
149 """ Load the rules for preprocessing and set up the handlers.
152 rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
153 config='TOKENIZER_CONFIG')
154 preprocessing_rules = rules.get('query-preprocessing', [])
156 self.preprocessors = []
158 for func in preprocessing_rules:
159 if 'step' not in func:
160 raise UsageError("Preprocessing rule is missing the 'step' attribute.")
161 if not isinstance(func['step'], str):
162 raise UsageError("'step' attribute must be a simple string.")
164 module = self.conn.config.load_plugin_module(
165 func['step'], 'nominatim_api.query_preprocessing')
166 self.preprocessors.append(
167 module.create(QueryConfig(func).set_normalizer(self.normalizer)))
169 async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
170 """ Analyze the given list of phrases and return the
173 log().section('Analyze query (using ICU tokenizer)')
174 for func in self.preprocessors:
175 phrases = func(phrases)
176 query = qmod.QueryStruct(phrases)
178 log().var_dump('Normalized query', query.source)
182 self.split_query(query)
183 log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
184 words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
186 for row in await self.lookup_in_db(list(words.keys())):
187 for trange in words[row.word_token]:
188 token = ICUToken.from_db_row(row, trange.penalty or 0.0)
190 if row.info['op'] in ('in', 'near'):
191 if trange.start == 0:
192 query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
194 if trange.start == 0 and trange.end == query.num_token_slots():
195 query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
197 query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
199 query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
201 self.add_extra_tokens(query)
202 self.rerank_tokens(query)
204 log().table_dump('Word tokens', _dump_word_tokens(query))
208 def normalize_text(self, text: str) -> str:
209 """ Bring the given text into a normalized form. That is the
210 standardized form search will work with. All information removed
211 at this stage is inevitably lost.
213 return cast(str, self.normalizer.transliterate(text)).strip('-: ')
215 def split_query(self, query: qmod.QueryStruct) -> None:
216 """ Transliterate the phrases and split them into tokens.
218 for phrase in query.source:
219 query.nodes[-1].ptype = phrase.ptype
220 phrase_split = re.split('([ :-])', phrase.text)
221 # The zip construct will give us the pairs of word/break from
222 # the regular expression split. As the split array ends on the
223 # final word, we simply use the fillvalue to even out the list and
224 # add the phrase break at the end.
225 for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
228 trans = self.transliterator.transliterate(word)
230 for term in trans.split(' '):
232 query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
233 PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
235 query.nodes[-1].adjust_break(breakchar,
236 PENALTY_IN_TOKEN_BREAK[breakchar])
238 query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
240 async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
241 """ Return the token information from the database for the
244 t = self.conn.t.meta.tables['word']
245 return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
247 def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
248 """ Add tokens to query that are not saved in the database.
251 for i, node in enumerate(query.nodes):
252 is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
253 if need_hnr and is_full_token \
254 and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
255 query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
256 ICUToken(penalty=0.5, token=0,
257 count=1, addr_count=1,
258 lookup_word=node.term_lookup,
259 word_token=node.term_lookup, info=None))
261 need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
263 def rerank_tokens(self, query: qmod.QueryStruct) -> None:
264 """ Add penalties to tokens that depend on presence of other token.
266 for i, node, tlist in query.iter_token_lists():
267 if tlist.ttype == qmod.TOKEN_POSTCODE:
268 for repl in node.starting:
269 if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
270 and (repl.ttype != qmod.TOKEN_HOUSENUMBER
271 or len(tlist.tokens[0].lookup_word) > 4):
272 repl.add_penalty(0.39)
273 elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
274 and len(tlist.tokens[0].lookup_word) <= 3):
275 if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
276 for repl in node.starting:
277 if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
278 repl.add_penalty(0.5 - tlist.tokens[0].penalty)
279 elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
280 norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
281 if n.btype != qmod.BREAK_TOKEN)
283 # Can happen when the token only covers a partial term
284 norm = query.nodes[i + 1].term_normalized
285 for token in tlist.tokens:
286 cast(ICUToken, token).rematch(norm)
289 def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
290 yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
291 for node in query.nodes:
292 for tlist in node.starting:
293 for token in tlist.tokens:
294 t = cast(ICUToken, token)
295 yield [tlist.ttype, t.token, t.word_token or '',
296 t.lookup_word or '', t.penalty, t.count, t.info]
299 async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
300 """ Create and set up a new query analyzer for a database based
301 on the ICU tokenizer.
303 out = ICUQueryAnalyzer(conn)