]> git.openstreetmap.org Git - nominatim.git/blob - src/nominatim_api/search/icu_tokenizer.py
e6bba95c6fd071d871ac1813265b734f3d6fdd13
[nominatim.git] / src / nominatim_api / search / icu_tokenizer.py
1 # SPDX-License-Identifier: GPL-3.0-or-later
2 #
3 # This file is part of Nominatim. (https://nominatim.org)
4 #
5 # Copyright (C) 2024 by the Nominatim developer community.
6 # For a full list of authors see the git log.
7 """
8 Implementation of query analysis for the ICU tokenizer.
9 """
10 from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
11 import dataclasses
12 import difflib
13 import re
14 from itertools import zip_longest
15
16 from icu import Transliterator
17
18 import sqlalchemy as sa
19
20 from ..errors import UsageError
21 from ..typing import SaRow
22 from ..sql.sqlalchemy_types import Json
23 from ..connection import SearchConnection
24 from ..logging import log
25 from . import query as qmod
26 from ..query_preprocessing.config import QueryConfig
27 from .query_analyzer_factory import AbstractQueryAnalyzer
28
29
30 DB_TO_TOKEN_TYPE = {
31     'W': qmod.TOKEN_WORD,
32     'w': qmod.TOKEN_PARTIAL,
33     'H': qmod.TOKEN_HOUSENUMBER,
34     'P': qmod.TOKEN_POSTCODE,
35     'C': qmod.TOKEN_COUNTRY
36 }
37
38 PENALTY_IN_TOKEN_BREAK = {
39      qmod.BREAK_START: 0.5,
40      qmod.BREAK_END: 0.5,
41      qmod.BREAK_PHRASE: 0.5,
42      qmod.BREAK_SOFT_PHRASE: 0.5,
43      qmod.BREAK_WORD: 0.1,
44      qmod.BREAK_PART: 0.0,
45      qmod.BREAK_TOKEN: 0.0
46 }
47
48
49 @dataclasses.dataclass
50 class ICUToken(qmod.Token):
51     """ Specialised token for ICU tokenizer.
52     """
53     word_token: str
54     info: Optional[Dict[str, Any]]
55
56     def get_category(self) -> Tuple[str, str]:
57         assert self.info
58         return self.info.get('class', ''), self.info.get('type', '')
59
60     def rematch(self, norm: str) -> None:
61         """ Check how well the token matches the given normalized string
62             and add a penalty, if necessary.
63         """
64         if not self.lookup_word:
65             return
66
67         seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
68         distance = 0
69         for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
70             if tag in ('delete', 'insert') and (afrom == 0 or ato == len(self.lookup_word)):
71                 distance += 1
72             elif tag == 'replace':
73                 distance += max((ato-afrom), (bto-bfrom))
74             elif tag != 'equal':
75                 distance += abs((ato-afrom) - (bto-bfrom))
76         self.penalty += (distance/len(self.lookup_word))
77
78     @staticmethod
79     def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
80         """ Create a ICUToken from the row of the word table.
81         """
82         count = 1 if row.info is None else row.info.get('count', 1)
83         addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
84
85         penalty = base_penalty
86         if row.type == 'w':
87             penalty += 0.3
88         elif row.type == 'W':
89             if len(row.word_token) == 1 and row.word_token == row.word:
90                 penalty += 0.2 if row.word.isdigit() else 0.3
91         elif row.type == 'H':
92             penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
93             if all(not c.isdigit() for c in row.word_token):
94                 penalty += 0.2 * (len(row.word_token) - 1)
95         elif row.type == 'C':
96             if len(row.word_token) == 1:
97                 penalty += 0.3
98
99         if row.info is None:
100             lookup_word = row.word
101         else:
102             lookup_word = row.info.get('lookup', row.word)
103         if lookup_word:
104             lookup_word = lookup_word.split('@', 1)[0]
105         else:
106             lookup_word = row.word_token
107
108         return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
109                         lookup_word=lookup_word,
110                         word_token=row.word_token, info=row.info,
111                         addr_count=max(1, addr_count))
112
113
114 class ICUQueryAnalyzer(AbstractQueryAnalyzer):
115     """ Converter for query strings into a tokenized query
116         using the tokens created by a ICU tokenizer.
117     """
118     def __init__(self, conn: SearchConnection) -> None:
119         self.conn = conn
120
121     async def setup(self) -> None:
122         """ Set up static data structures needed for the analysis.
123         """
124         async def _make_normalizer() -> Any:
125             rules = await self.conn.get_property('tokenizer_import_normalisation')
126             return Transliterator.createFromRules("normalization", rules)
127
128         self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
129                                                            _make_normalizer)
130
131         async def _make_transliterator() -> Any:
132             rules = await self.conn.get_property('tokenizer_import_transliteration')
133             return Transliterator.createFromRules("transliteration", rules)
134
135         self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
136                                                                _make_transliterator)
137
138         await self._setup_preprocessing()
139
140         if 'word' not in self.conn.t.meta.tables:
141             sa.Table('word', self.conn.t.meta,
142                      sa.Column('word_id', sa.Integer),
143                      sa.Column('word_token', sa.Text, nullable=False),
144                      sa.Column('type', sa.Text, nullable=False),
145                      sa.Column('word', sa.Text),
146                      sa.Column('info', Json))
147
148     async def _setup_preprocessing(self) -> None:
149         """ Load the rules for preprocessing and set up the handlers.
150         """
151
152         rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
153                                                         config='TOKENIZER_CONFIG')
154         preprocessing_rules = rules.get('query-preprocessing', [])
155
156         self.preprocessors = []
157
158         for func in preprocessing_rules:
159             if 'step' not in func:
160                 raise UsageError("Preprocessing rule is missing the 'step' attribute.")
161             if not isinstance(func['step'], str):
162                 raise UsageError("'step' attribute must be a simple string.")
163
164             module = self.conn.config.load_plugin_module(
165                         func['step'], 'nominatim_api.query_preprocessing')
166             self.preprocessors.append(
167                 module.create(QueryConfig(func).set_normalizer(self.normalizer)))
168
169     async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
170         """ Analyze the given list of phrases and return the
171             tokenized query.
172         """
173         log().section('Analyze query (using ICU tokenizer)')
174         for func in self.preprocessors:
175             phrases = func(phrases)
176         query = qmod.QueryStruct(phrases)
177
178         log().var_dump('Normalized query', query.source)
179         if not query.source:
180             return query
181
182         self.split_query(query)
183         log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
184         words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
185
186         for row in await self.lookup_in_db(list(words.keys())):
187             for trange in words[row.word_token]:
188                 token = ICUToken.from_db_row(row, trange.penalty or 0.0)
189                 if row.type == 'S':
190                     if row.info['op'] in ('in', 'near'):
191                         if trange.start == 0:
192                             query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
193                     else:
194                         if trange.start == 0 and trange.end == query.num_token_slots():
195                             query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
196                         else:
197                             query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
198                 else:
199                     query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
200
201         self.add_extra_tokens(query)
202         self.rerank_tokens(query)
203
204         log().table_dump('Word tokens', _dump_word_tokens(query))
205
206         return query
207
208     def normalize_text(self, text: str) -> str:
209         """ Bring the given text into a normalized form. That is the
210             standardized form search will work with. All information removed
211             at this stage is inevitably lost.
212         """
213         return cast(str, self.normalizer.transliterate(text)).strip('-: ')
214
215     def split_query(self, query: qmod.QueryStruct) -> None:
216         """ Transliterate the phrases and split them into tokens.
217         """
218         for phrase in query.source:
219             query.nodes[-1].ptype = phrase.ptype
220             phrase_split = re.split('([ :-])', phrase.text)
221             # The zip construct will give us the pairs of word/break from
222             # the regular expression split. As the split array ends on the
223             # final word, we simply use the fillvalue to even out the list and
224             # add the phrase break at the end.
225             for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
226                 if not word:
227                     continue
228                 trans = self.transliterator.transliterate(word)
229                 if trans:
230                     for term in trans.split(' '):
231                         if term:
232                             query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
233                                            PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
234                                            term, word)
235                     query.nodes[-1].adjust_break(breakchar,
236                                                  PENALTY_IN_TOKEN_BREAK[breakchar])
237
238         query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
239
240     async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
241         """ Return the token information from the database for the
242             given word tokens.
243         """
244         t = self.conn.t.meta.tables['word']
245         return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
246
247     def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
248         """ Add tokens to query that are not saved in the database.
249         """
250         need_hnr = False
251         for i, node in enumerate(query.nodes):
252             is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
253             if need_hnr and is_full_token \
254                     and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
255                 query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
256                                 ICUToken(penalty=0.5, token=0,
257                                          count=1, addr_count=1,
258                                          lookup_word=node.term_lookup,
259                                          word_token=node.term_lookup, info=None))
260
261             need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
262
263     def rerank_tokens(self, query: qmod.QueryStruct) -> None:
264         """ Add penalties to tokens that depend on presence of other token.
265         """
266         for i, node, tlist in query.iter_token_lists():
267             if tlist.ttype == qmod.TOKEN_POSTCODE:
268                 for repl in node.starting:
269                     if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
270                        and (repl.ttype != qmod.TOKEN_HOUSENUMBER
271                             or len(tlist.tokens[0].lookup_word) > 4):
272                         repl.add_penalty(0.39)
273             elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
274                   and len(tlist.tokens[0].lookup_word) <= 3):
275                 if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
276                     for repl in node.starting:
277                         if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
278                             repl.add_penalty(0.5 - tlist.tokens[0].penalty)
279             elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
280                 norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
281                                 if n.btype != qmod.BREAK_TOKEN)
282                 if not norm:
283                     # Can happen when the token only covers a partial term
284                     norm = query.nodes[i + 1].term_normalized
285                 for token in tlist.tokens:
286                     cast(ICUToken, token).rematch(norm)
287
288
289 def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
290     yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
291     for node in query.nodes:
292         for tlist in node.starting:
293             for token in tlist.tokens:
294                 t = cast(ICUToken, token)
295                 yield [tlist.ttype, t.token, t.word_token or '',
296                        t.lookup_word or '', t.penalty, t.count, t.info]
297
298
299 async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
300     """ Create and set up a new query analyzer for a database based
301         on the ICU tokenizer.
302     """
303     out = ICUQueryAnalyzer(conn)
304     await out.setup()
305
306     return out