1 # SPDX-License-Identifier: GPL-3.0-or-later
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2023 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Implementation of query analysis for the ICU tokenizer.
10 from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
12 from collections import defaultdict
16 from icu import Transliterator
18 import sqlalchemy as sa
20 from nominatim.typing import SaRow
21 from nominatim.api.connection import SearchConnection
22 from nominatim.api.logging import log
23 from nominatim.api.search import query as qmod
24 from nominatim.api.search.query_analyzer_factory import AbstractQueryAnalyzer
28 'W': qmod.TokenType.WORD,
29 'w': qmod.TokenType.PARTIAL,
30 'H': qmod.TokenType.HOUSENUMBER,
31 'P': qmod.TokenType.POSTCODE,
32 'C': qmod.TokenType.COUNTRY
36 class QueryPart(NamedTuple):
37 """ Normalized and transliterated form of a single term in the query.
38 When the term came out of a split during the transliteration,
39 the normalized string is the full word before transliteration.
40 The word number keeps track of the word before transliteration
41 and can be used to identify partial transliterated terms.
48 QueryParts = List[QueryPart]
49 WordDict = Dict[str, List[qmod.TokenRange]]
51 def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
52 """ Return all combinations of words in the terms list after the
56 for first in range(start, total):
57 word = terms[first].token
58 yield word, qmod.TokenRange(first, first + 1)
59 for last in range(first + 1, min(first + 20, total)):
60 word = ' '.join((word, terms[last].token))
61 yield word, qmod.TokenRange(first, last + 1)
64 @dataclasses.dataclass
65 class ICUToken(qmod.Token):
66 """ Specialised token for ICU tokenizer.
69 info: Optional[Dict[str, Any]]
71 def get_category(self) -> Tuple[str, str]:
73 return self.info.get('class', ''), self.info.get('type', '')
76 def rematch(self, norm: str) -> None:
77 """ Check how well the token matches the given normalized string
78 and add a penalty, if necessary.
80 if not self.lookup_word:
83 seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
85 for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
86 if tag == 'delete' and (afrom == 0 or ato == len(self.lookup_word)):
88 elif tag == 'replace':
89 distance += max((ato-afrom), (bto-bfrom))
91 distance += abs((ato-afrom) - (bto-bfrom))
92 self.penalty += (distance/len(self.lookup_word))
96 def from_db_row(row: SaRow) -> 'ICUToken':
97 """ Create a ICUToken from the row of the word table.
99 count = 1 if row.info is None else row.info.get('count', 1)
104 elif row.type == 'H':
105 penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
106 if all(not c.isdigit() for c in row.word_token):
107 penalty += 0.2 * (len(row.word_token) - 1)
110 lookup_word = row.word
112 lookup_word = row.info.get('lookup', row.word)
114 lookup_word = lookup_word.split('@', 1)[0]
116 lookup_word = row.word_token
118 return ICUToken(penalty=penalty, token=row.word_id, count=count,
119 lookup_word=lookup_word, is_indexed=True,
120 word_token=row.word_token, info=row.info)
124 class ICUQueryAnalyzer(AbstractQueryAnalyzer):
125 """ Converter for query strings into a tokenized query
126 using the tokens created by a ICU tokenizer.
129 def __init__(self, conn: SearchConnection) -> None:
133 async def setup(self) -> None:
134 """ Set up static data structures needed for the analysis.
136 rules = await self.conn.get_property('tokenizer_import_normalisation')
137 self.normalizer = Transliterator.createFromRules("normalization", rules)
138 rules = await self.conn.get_property('tokenizer_import_transliteration')
139 self.transliterator = Transliterator.createFromRules("transliteration", rules)
141 if 'word' not in self.conn.t.meta.tables:
142 sa.Table('word', self.conn.t.meta,
143 sa.Column('word_id', sa.Integer),
144 sa.Column('word_token', sa.Text, nullable=False),
145 sa.Column('type', sa.Text, nullable=False),
146 sa.Column('word', sa.Text),
147 sa.Column('info', self.conn.t.types.Json))
150 async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
151 """ Analyze the given list of phrases and return the
154 log().section('Analyze query (using ICU tokenizer)')
155 normalized = list(filter(lambda p: p.text,
156 (qmod.Phrase(p.ptype, self.normalizer.transliterate(p.text))
158 query = qmod.QueryStruct(normalized)
159 log().var_dump('Normalized query', query.source)
163 parts, words = self.split_query(query)
164 log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts))
166 for row in await self.lookup_in_db(list(words.keys())):
167 for trange in words[row.word_token]:
168 token = ICUToken.from_db_row(row)
170 if row.info['op'] in ('in', 'near'):
171 if trange.start == 0:
172 query.add_token(trange, qmod.TokenType.CATEGORY, token)
174 query.add_token(trange, qmod.TokenType.QUALIFIER, token)
175 if trange.start == 0 or trange.end == query.num_token_slots():
177 token.penalty += 0.1 * (query.num_token_slots())
178 query.add_token(trange, qmod.TokenType.CATEGORY, token)
180 query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
182 self.add_extra_tokens(query, parts)
183 self.rerank_tokens(query, parts)
185 log().table_dump('Word tokens', _dump_word_tokens(query))
190 def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]:
191 """ Transliterate the phrases and split them into tokens.
193 Returns the list of transliterated tokens together with their
194 normalized form and a dictionary of words for lookup together
197 parts: QueryParts = []
199 words = defaultdict(list)
201 for phrase in query.source:
202 query.nodes[-1].ptype = phrase.ptype
203 for word in phrase.text.split(' '):
204 trans = self.transliterator.transliterate(word)
206 for term in trans.split(' '):
208 parts.append(QueryPart(term, word, wordnr))
209 query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
210 query.nodes[-1].btype = qmod.BreakType.WORD
212 query.nodes[-1].btype = qmod.BreakType.PHRASE
214 for word, wrange in yield_words(parts, phrase_start):
215 words[word].append(wrange)
217 phrase_start = len(parts)
218 query.nodes[-1].btype = qmod.BreakType.END
223 async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
224 """ Return the token information from the database for the
227 t = self.conn.t.meta.tables['word']
228 return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
231 def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
232 """ Add tokens to query that are not saved in the database.
234 for part, node, i in zip(parts, query.nodes, range(1000)):
235 if len(part.token) <= 4 and part[0].isdigit()\
236 and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
237 query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
238 ICUToken(0.5, 0, 1, part.token, True, part.token, None))
241 def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
242 """ Add penalties to tokens that depend on presence of other token.
244 for i, node, tlist in query.iter_token_lists():
245 if tlist.ttype == qmod.TokenType.POSTCODE:
246 for repl in node.starting:
247 if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
248 and (repl.ttype != qmod.TokenType.HOUSENUMBER
249 or len(tlist.tokens[0].lookup_word) > 4):
250 repl.add_penalty(0.39)
251 elif tlist.ttype == qmod.TokenType.HOUSENUMBER:
252 if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
253 for repl in node.starting:
254 if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER \
255 and (repl.ttype != qmod.TokenType.HOUSENUMBER
256 or len(tlist.tokens[0].lookup_word) <= 3):
257 repl.add_penalty(0.5 - tlist.tokens[0].penalty)
258 elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL):
259 norm = parts[i].normalized
260 for j in range(i + 1, tlist.end):
261 if parts[j - 1].word_number != parts[j].word_number:
262 norm += ' ' + parts[j].normalized
263 for token in tlist.tokens:
264 cast(ICUToken, token).rematch(norm)
267 def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
268 out = query.nodes[0].btype.value
269 for node, part in zip(query.nodes[1:], parts):
270 out += part.token + node.btype.value
274 def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
275 yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
276 for node in query.nodes:
277 for tlist in node.starting:
278 for token in tlist.tokens:
279 t = cast(ICUToken, token)
280 yield [tlist.ttype.name, t.token, t.word_token or '',
281 t.lookup_word or '', t.penalty, t.count, t.info]
284 async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
285 """ Create and set up a new query analyzer for a database based
286 on the ICU tokenizer.
288 out = ICUQueryAnalyzer(conn)