]> git.openstreetmap.org Git - nominatim.git/blob - src/nominatim_api/search/icu_tokenizer.py
01513103dcc059f33ff84e46d5058660ad0db93a
[nominatim.git] / src / nominatim_api / search / icu_tokenizer.py
1 # SPDX-License-Identifier: GPL-3.0-or-later
2 #
3 # This file is part of Nominatim. (https://nominatim.org)
4 #
5 # Copyright (C) 2024 by the Nominatim developer community.
6 # For a full list of authors see the git log.
7 """
8 Implementation of query analysis for the ICU tokenizer.
9 """
10 from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
11 import dataclasses
12 import difflib
13 import re
14 from itertools import zip_longest
15
16 from icu import Transliterator
17
18 import sqlalchemy as sa
19
20 from ..errors import UsageError
21 from ..typing import SaRow
22 from ..sql.sqlalchemy_types import Json
23 from ..connection import SearchConnection
24 from ..logging import log
25 from . import query as qmod
26 from ..query_preprocessing.config import QueryConfig
27 from .query_analyzer_factory import AbstractQueryAnalyzer
28 from .postcode_parser import PostcodeParser
29
30
31 DB_TO_TOKEN_TYPE = {
32     'W': qmod.TOKEN_WORD,
33     'w': qmod.TOKEN_PARTIAL,
34     'H': qmod.TOKEN_HOUSENUMBER,
35     'P': qmod.TOKEN_POSTCODE,
36     'C': qmod.TOKEN_COUNTRY
37 }
38
39 PENALTY_IN_TOKEN_BREAK = {
40      qmod.BREAK_START: 0.5,
41      qmod.BREAK_END: 0.5,
42      qmod.BREAK_PHRASE: 0.5,
43      qmod.BREAK_SOFT_PHRASE: 0.5,
44      qmod.BREAK_WORD: 0.1,
45      qmod.BREAK_PART: 0.0,
46      qmod.BREAK_TOKEN: 0.0
47 }
48
49
50 @dataclasses.dataclass
51 class ICUToken(qmod.Token):
52     """ Specialised token for ICU tokenizer.
53     """
54     word_token: str
55     info: Optional[Dict[str, Any]]
56
57     def get_category(self) -> Tuple[str, str]:
58         assert self.info
59         return self.info.get('class', ''), self.info.get('type', '')
60
61     def rematch(self, norm: str) -> None:
62         """ Check how well the token matches the given normalized string
63             and add a penalty, if necessary.
64         """
65         if not self.lookup_word:
66             return
67
68         seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
69         distance = 0
70         for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
71             if tag in ('delete', 'insert') and (afrom == 0 or ato == len(self.lookup_word)):
72                 distance += 1
73             elif tag == 'replace':
74                 distance += max((ato-afrom), (bto-bfrom))
75             elif tag != 'equal':
76                 distance += abs((ato-afrom) - (bto-bfrom))
77         self.penalty += (distance/len(self.lookup_word))
78
79     @staticmethod
80     def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
81         """ Create a ICUToken from the row of the word table.
82         """
83         count = 1 if row.info is None else row.info.get('count', 1)
84         addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
85
86         penalty = base_penalty
87         if row.type == 'w':
88             penalty += 0.3
89         elif row.type == 'W':
90             if len(row.word_token) == 1 and row.word_token == row.word:
91                 penalty += 0.2 if row.word.isdigit() else 0.3
92         elif row.type == 'H':
93             penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
94             if all(not c.isdigit() for c in row.word_token):
95                 penalty += 0.2 * (len(row.word_token) - 1)
96         elif row.type == 'C':
97             if len(row.word_token) == 1:
98                 penalty += 0.3
99
100         if row.info is None:
101             lookup_word = row.word
102         else:
103             lookup_word = row.info.get('lookup', row.word)
104         if lookup_word:
105             lookup_word = lookup_word.split('@', 1)[0]
106         else:
107             lookup_word = row.word_token
108
109         return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
110                         lookup_word=lookup_word,
111                         word_token=row.word_token, info=row.info,
112                         addr_count=max(1, addr_count))
113
114
115 class ICUQueryAnalyzer(AbstractQueryAnalyzer):
116     """ Converter for query strings into a tokenized query
117         using the tokens created by a ICU tokenizer.
118     """
119     def __init__(self, conn: SearchConnection) -> None:
120         self.conn = conn
121         self.postcode_parser = PostcodeParser(conn.config)
122
123     async def setup(self) -> None:
124         """ Set up static data structures needed for the analysis.
125         """
126         async def _make_normalizer() -> Any:
127             rules = await self.conn.get_property('tokenizer_import_normalisation')
128             return Transliterator.createFromRules("normalization", rules)
129
130         self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
131                                                            _make_normalizer)
132
133         async def _make_transliterator() -> Any:
134             rules = await self.conn.get_property('tokenizer_import_transliteration')
135             return Transliterator.createFromRules("transliteration", rules)
136
137         self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
138                                                                _make_transliterator)
139
140         await self._setup_preprocessing()
141
142         if 'word' not in self.conn.t.meta.tables:
143             sa.Table('word', self.conn.t.meta,
144                      sa.Column('word_id', sa.Integer),
145                      sa.Column('word_token', sa.Text, nullable=False),
146                      sa.Column('type', sa.Text, nullable=False),
147                      sa.Column('word', sa.Text),
148                      sa.Column('info', Json))
149
150     async def _setup_preprocessing(self) -> None:
151         """ Load the rules for preprocessing and set up the handlers.
152         """
153
154         rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
155                                                         config='TOKENIZER_CONFIG')
156         preprocessing_rules = rules.get('query-preprocessing', [])
157
158         self.preprocessors = []
159
160         for func in preprocessing_rules:
161             if 'step' not in func:
162                 raise UsageError("Preprocessing rule is missing the 'step' attribute.")
163             if not isinstance(func['step'], str):
164                 raise UsageError("'step' attribute must be a simple string.")
165
166             module = self.conn.config.load_plugin_module(
167                         func['step'], 'nominatim_api.query_preprocessing')
168             self.preprocessors.append(
169                 module.create(QueryConfig(func).set_normalizer(self.normalizer)))
170
171     async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
172         """ Analyze the given list of phrases and return the
173             tokenized query.
174         """
175         log().section('Analyze query (using ICU tokenizer)')
176         for func in self.preprocessors:
177             phrases = func(phrases)
178         query = qmod.QueryStruct(phrases)
179
180         log().var_dump('Normalized query', query.source)
181         if not query.source:
182             return query
183
184         self.split_query(query)
185         log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
186         words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
187
188         for row in await self.lookup_in_db(list(words.keys())):
189             for trange in words[row.word_token]:
190                 token = ICUToken.from_db_row(row, trange.penalty or 0.0)
191                 if row.type == 'S':
192                     if row.info['op'] in ('in', 'near'):
193                         if trange.start == 0:
194                             query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
195                     else:
196                         if trange.start == 0 and trange.end == query.num_token_slots():
197                             query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
198                         else:
199                             query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
200                 else:
201                     query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
202
203         self.add_extra_tokens(query)
204         for start, end, pc in self.postcode_parser.parse(query):
205             query.add_token(qmod.TokenRange(start, end),
206                             qmod.TOKEN_POSTCODE,
207                             ICUToken(penalty=0.1, token=0, count=1, addr_count=1,
208                                      lookup_word=pc, word_token=pc, info=None))
209         self.rerank_tokens(query)
210
211         log().table_dump('Word tokens', _dump_word_tokens(query))
212
213         return query
214
215     def normalize_text(self, text: str) -> str:
216         """ Bring the given text into a normalized form. That is the
217             standardized form search will work with. All information removed
218             at this stage is inevitably lost.
219         """
220         return cast(str, self.normalizer.transliterate(text)).strip('-: ')
221
222     def split_query(self, query: qmod.QueryStruct) -> None:
223         """ Transliterate the phrases and split them into tokens.
224         """
225         for phrase in query.source:
226             query.nodes[-1].ptype = phrase.ptype
227             phrase_split = re.split('([ :-])', phrase.text)
228             # The zip construct will give us the pairs of word/break from
229             # the regular expression split. As the split array ends on the
230             # final word, we simply use the fillvalue to even out the list and
231             # add the phrase break at the end.
232             for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
233                 if not word:
234                     continue
235                 trans = self.transliterator.transliterate(word)
236                 if trans:
237                     for term in trans.split(' '):
238                         if term:
239                             query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
240                                            PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
241                                            term, word)
242                     query.nodes[-1].adjust_break(breakchar,
243                                                  PENALTY_IN_TOKEN_BREAK[breakchar])
244
245         query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
246
247     async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
248         """ Return the token information from the database for the
249             given word tokens.
250
251             This function excludes postcode tokens
252         """
253         t = self.conn.t.meta.tables['word']
254         return await self.conn.execute(t.select()
255                                         .where(t.c.word_token.in_(words))
256                                         .where(t.c.type != 'P'))
257
258     def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
259         """ Add tokens to query that are not saved in the database.
260         """
261         need_hnr = False
262         for i, node in enumerate(query.nodes):
263             is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
264             if need_hnr and is_full_token \
265                     and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
266                 query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
267                                 ICUToken(penalty=0.5, token=0,
268                                          count=1, addr_count=1,
269                                          lookup_word=node.term_lookup,
270                                          word_token=node.term_lookup, info=None))
271
272             need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
273
274     def rerank_tokens(self, query: qmod.QueryStruct) -> None:
275         """ Add penalties to tokens that depend on presence of other token.
276         """
277         for i, node, tlist in query.iter_token_lists():
278             if tlist.ttype == qmod.TOKEN_POSTCODE:
279                 for repl in node.starting:
280                     if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
281                        and (repl.ttype != qmod.TOKEN_HOUSENUMBER
282                             or len(tlist.tokens[0].lookup_word) > 4):
283                         repl.add_penalty(0.39)
284             elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
285                   and len(tlist.tokens[0].lookup_word) <= 3):
286                 if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
287                     for repl in node.starting:
288                         if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
289                             repl.add_penalty(0.5 - tlist.tokens[0].penalty)
290             elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
291                 norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
292                                 if n.btype != qmod.BREAK_TOKEN)
293                 if not norm:
294                     # Can happen when the token only covers a partial term
295                     norm = query.nodes[i + 1].term_normalized
296                 for token in tlist.tokens:
297                     cast(ICUToken, token).rematch(norm)
298
299
300 def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
301     yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
302     for node in query.nodes:
303         for tlist in node.starting:
304             for token in tlist.tokens:
305                 t = cast(ICUToken, token)
306                 yield [tlist.ttype, t.token, t.word_token or '',
307                        t.lookup_word or '', t.penalty, t.count, t.info]
308
309
310 async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
311     """ Create and set up a new query analyzer for a database based
312         on the ICU tokenizer.
313     """
314     out = ICUQueryAnalyzer(conn)
315     await out.setup()
316
317     return out