]> git.openstreetmap.org Git - nominatim.git/commitdiff
Merge remote-tracking branch 'upstream/master'
authorSarah Hoffmann <lonvia@denofr.de>
Fri, 10 Jan 2025 19:58:34 +0000 (20:58 +0100)
committerSarah Hoffmann <lonvia@denofr.de>
Fri, 10 Jan 2025 19:58:34 +0000 (20:58 +0100)
1  2 
packaging/nominatim-db/pyproject.toml
src/nominatim_api/search/db_search_builder.py
src/nominatim_api/search/icu_tokenizer.py

index 8604ac3d4cd59c92fabd1860486ca752290100de,c34ce937ea6fb457bb5d05d86600ecf02d885f41..f35880f5b7f89d76639f60c77081ff8523a94c94
@@@ -1,6 -1,5 +1,6 @@@
  [project]
  name = "nominatim-db"
 +version = "4.5.0.post8"
  description = "A tool for building a database of OpenStreetMap for geocoding and for searching the database. Database backend."
  readme = "README.md"
  requires-python = ">=3.7"
@@@ -16,15 -15,15 +16,15 @@@ classifiers = 
      "Operating System :: OS Independent",
  ]
  dependencies = [
 -    "psycopg",
 -    "python-dotenv",
 -    "jinja2",
 -    "pyYAML>=5.1",
 -    "datrie",
 -    "psutil",
 -    "PyICU"
 +    "psycopg[binary]==3.2.3",
 +    "python-dotenv==1.0.1",
 +    "jinja2==3.1.4",
 +    "pyYAML==6.0.2",
 +    "datrie==0.8.2",
 +    "psutil==6.1.0",
 +    "PyICU==2.14",
 +    "osmium==4.0.2",
  ]
 -dynamic = ["version"]
  
  [project.urls]
  Homepage = "https://nominatim.org"
@@@ -45,6 -44,7 +45,7 @@@ include = 
      "src/nominatim_db",
      "scripts",
      "lib-sql/**/*.sql",
+     "lib-lua/**/*.lua",
      "settings",
      "data/words.sql",
      "extra_src/nominatim_db/paths.py"
@@@ -66,6 -66,7 +67,7 @@@ packages = ["src/nominatim_db"
  
  [tool.hatch.build.targets.wheel.force-include]
  "lib-sql" = "nominatim_db/resources/lib-sql"
+ "lib-lua" = "nominatim_db/resources/lib-lua"
  "settings" = "nominatim_db/resources/settings"
  "data/country_osm_grid.sql.gz" = "nominatim_db/resources/country_osm_grid.sql.gz"
  "data/words.sql" = "nominatim_db/resources/words.sql"
index 1fbb7168bb44a963f31e83bfd99f6f534bcf9be5,a6335c1377c0eada94fb0a3554d31aa48d2e1742..dbf84a0cafb04ad36435c6d20627cab98d313f20
@@@ -215,13 -215,13 +215,13 @@@ class SearchBuilder
              yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
              return
  
 -        addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
 +        addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000
          # Partial term to frequent. Try looking up by rare full names first.
          name_fulls = self.query.get_tokens(name, TokenType.WORD)
          if name_fulls:
              fulls_count = sum(t.count for t in name_fulls)
  
 -            if fulls_count < 50000 or addr_count < 30000:
 +            if fulls_count < 80000 or addr_count < 50000:
                  yield penalty, fulls_count / (2**len(addr_tokens)), \
                      self.get_full_name_ranking(name_fulls, addr_partials,
                                                 fulls_count > 30000 / max(1, len(addr_tokens)))
          # This might yield wrong results, nothing we can do about that.
          if use_lookup:
              addr_restrict_tokens = []
 -            addr_lookup_tokens = []
 -            for t in addr_partials:
 -                if t.addr_count > 20000:
 -                    addr_restrict_tokens.append(t.token)
 -                else:
 -                    addr_lookup_tokens.append(t.token)
 +            addr_lookup_tokens = [t.token for t in addr_partials]
          else:
              addr_restrict_tokens = [t.token for t in addr_partials]
              addr_lookup_tokens = []
@@@ -428,6 -433,7 +428,7 @@@ PENALTY_WORDCHANGE = 
      BreakType.START: 0.0,
      BreakType.END: 0.0,
      BreakType.PHRASE: 0.0,
+     BreakType.SOFT_PHRASE: 0.0,
      BreakType.WORD: 0.1,
      BreakType.PART: 0.2,
      BreakType.TOKEN: 0.4
index ac78d03c1fc1776667d3cb31020cc03bfa0c5a90,6f1dcf7902ab65e5a4481d6a3d7b65e6274deab8..8f2069c1a8bc57212956d1ce64d3a5914c12f920
@@@ -7,21 -7,25 +7,25 @@@
  """
  Implementation of query analysis for the ICU tokenizer.
  """
- from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
+ from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
  from collections import defaultdict
  import dataclasses
  import difflib
+ import re
+ from itertools import zip_longest
  
  from icu import Transliterator
  
  import sqlalchemy as sa
  
+ from ..errors import UsageError
  from ..typing import SaRow
  from ..sql.sqlalchemy_types import Json
  from ..connection import SearchConnection
  from ..logging import log
- from ..search import query as qmod
- from ..search.query_analyzer_factory import AbstractQueryAnalyzer
+ from . import query as qmod
+ from ..query_preprocessing.config import QueryConfig
+ from .query_analyzer_factory import AbstractQueryAnalyzer
  
  
  DB_TO_TOKEN_TYPE = {
      'C': qmod.TokenType.COUNTRY
  }
  
+ PENALTY_IN_TOKEN_BREAK = {
+      qmod.BreakType.START: 0.5,
+      qmod.BreakType.END: 0.5,
+      qmod.BreakType.PHRASE: 0.5,
+      qmod.BreakType.SOFT_PHRASE: 0.5,
+      qmod.BreakType.WORD: 0.1,
+      qmod.BreakType.PART: 0.0,
+      qmod.BreakType.TOKEN: 0.0
+ }
  
- class QueryPart(NamedTuple):
+ @dataclasses.dataclass
+ class QueryPart:
      """ Normalized and transliterated form of a single term in the query.
          When the term came out of a split during the transliteration,
          the normalized string is the full word before transliteration.
          The word number keeps track of the word before transliteration
          and can be used to identify partial transliterated terms.
+         Penalty is the break penalty for the break following the token.
      """
      token: str
      normalized: str
      word_number: int
+     penalty: float
  
  
  QueryParts = List[QueryPart]
@@@ -56,10 -73,12 +73,12 @@@ def yield_words(terms: List[QueryPart]
      total = len(terms)
      for first in range(start, total):
          word = terms[first].token
-         yield word, qmod.TokenRange(first, first + 1)
+         penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType.WORD]
+         yield word, qmod.TokenRange(first, first + 1, penalty=penalty)
          for last in range(first + 1, min(first + 20, total)):
              word = ' '.join((word, terms[last].token))
-             yield word, qmod.TokenRange(first, last + 1)
+             penalty += terms[last - 1].penalty
+             yield word, qmod.TokenRange(first, last + 1, penalty=penalty)
  
  
  @dataclasses.dataclass
@@@ -92,25 -111,25 +111,25 @@@ class ICUToken(qmod.Token)
          self.penalty += (distance/len(self.lookup_word))
  
      @staticmethod
-     def from_db_row(row: SaRow) -> 'ICUToken':
+     def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
          """ Create a ICUToken from the row of the word table.
          """
          count = 1 if row.info is None else row.info.get('count', 1)
          addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
  
-         penalty = 0.0
+         penalty = base_penalty
          if row.type == 'w':
-             penalty = 0.3
+             penalty += 0.3
          elif row.type == 'W':
              if len(row.word_token) == 1 and row.word_token == row.word:
-                 penalty = 0.2 if row.word.isdigit() else 0.3
+                 penalty += 0.2 if row.word.isdigit() else 0.3
          elif row.type == 'H':
-             penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
+             penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
              if all(not c.isdigit() for c in row.word_token):
                  penalty += 0.2 * (len(row.word_token) - 1)
          elif row.type == 'C':
              if len(row.word_token) == 1:
-                 penalty = 0.3
+                 penalty += 0.3
  
          if row.info is None:
              lookup_word = row.word
@@@ -151,6 -170,8 +170,8 @@@ class ICUQueryAnalyzer(AbstractQueryAna
          self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
                                                                 _make_transliterator)
  
+         await self._setup_preprocessing()
          if 'word' not in self.conn.t.meta.tables:
              sa.Table('word', self.conn.t.meta,
                       sa.Column('word_id', sa.Integer),
                       sa.Column('word', sa.Text),
                       sa.Column('info', Json))
  
+     async def _setup_preprocessing(self) -> None:
+         """ Load the rules for preprocessing and set up the handlers.
+         """
+         rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
+                                                         config='TOKENIZER_CONFIG')
+         preprocessing_rules = rules.get('query-preprocessing', [])
+         self.preprocessors = []
+         for func in preprocessing_rules:
+             if 'step' not in func:
+                 raise UsageError("Preprocessing rule is missing the 'step' attribute.")
+             if not isinstance(func['step'], str):
+                 raise UsageError("'step' attribute must be a simple string.")
+             module = self.conn.config.load_plugin_module(
+                         func['step'], 'nominatim_api.query_preprocessing')
+             self.preprocessors.append(
+                 module.create(QueryConfig(func).set_normalizer(self.normalizer)))
      async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
          """ Analyze the given list of phrases and return the
              tokenized query.
          """
          log().section('Analyze query (using ICU tokenizer)')
-         normalized = list(filter(lambda p: p.text,
-                                  (qmod.Phrase(p.ptype, self.normalize_text(p.text))
-                                   for p in phrases)))
-         if len(normalized) == 1 \
-                 and normalized[0].text.count(' ') > 3 \
-                 and max(len(s) for s in normalized[0].text.split()) < 3:
+         for func in self.preprocessors:
+             phrases = func(phrases)
++
++        if len(phrases) == 1 \
++                and phrases[0].text.count(' ') > 3 \
++                and max(len(s) for s in phrases[0].text.split()) < 3:
 +            normalized = []
-         query = qmod.QueryStruct(normalized)
++
+         query = qmod.QueryStruct(phrases)
          log().var_dump('Normalized query', query.source)
          if not query.source:
              return query
  
          for row in await self.lookup_in_db(list(words.keys())):
              for trange in words[row.word_token]:
-                 token = ICUToken.from_db_row(row)
+                 token = ICUToken.from_db_row(row, trange.penalty or 0.0)
                  if row.type == 'S':
                      if row.info['op'] in ('in', 'near'):
                          if trange.start == 0:
          wordnr = 0
          for phrase in query.source:
              query.nodes[-1].ptype = phrase.ptype
-             for word in phrase.text.split(' '):
+             phrase_split = re.split('([ :-])', phrase.text)
+             # The zip construct will give us the pairs of word/break from
+             # the regular expression split. As the split array ends on the
+             # final word, we simply use the fillvalue to even out the list and
+             # add the phrase break at the end.
+             for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
+                 if not word:
+                     continue
                  trans = self.transliterator.transliterate(word)
                  if trans:
                      for term in trans.split(' '):
                          if term:
-                             parts.append(QueryPart(term, word, wordnr))
+                             parts.append(QueryPart(term, word, wordnr,
+                                                    PENALTY_IN_TOKEN_BREAK[qmod.BreakType.TOKEN]))
                              query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
-                     query.nodes[-1].btype = qmod.BreakType.WORD
+                     query.nodes[-1].btype = qmod.BreakType(breakchar)
+                     parts[-1].penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType(breakchar)]
                  wordnr += 1
-             query.nodes[-1].btype = qmod.BreakType.PHRASE
  
              for word, wrange in yield_words(parts, phrase_start):
                  words[word].append(wrange)
          """ Add tokens to query that are not saved in the database.
          """
          for part, node, i in zip(parts, query.nodes, range(1000)):
-             if len(part.token) <= 4 and part[0].isdigit()\
+             if len(part.token) <= 4 and part.token.isdigit()\
                 and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
                  query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
                                  ICUToken(penalty=0.5, token=0,