]> git.openstreetmap.org Git - nominatim.git/commitdiff
remove support for unindexed tokens
authorSarah Hoffmann <lonvia@denofr.de>
Sun, 22 Sep 2024 08:39:10 +0000 (10:39 +0200)
committerSarah Hoffmann <lonvia@denofr.de>
Sun, 22 Sep 2024 08:39:10 +0000 (10:39 +0200)
This was a special feature of the legacy tokenizer who would not
index very frequent tokens.

src/nominatim_api/search/db_search_builder.py
src/nominatim_api/search/icu_tokenizer.py
src/nominatim_api/search/query.py
test/python/api/search/test_api_search_query.py
test/python/api/search/test_db_search_builder.py
test/python/api/search/test_token_assignment.py

index 6453509ebce93d5ba26433742cb8263c87eb7045..1ac6db2b2a96d5ebc5095d81bd936b0002194600 100644 (file)
@@ -167,8 +167,7 @@ class SearchBuilder:
         expected_count = sum(t.count for t in hnrs)
 
         partials = {t.token: t.addr_count for trange in address
-                       for t in self.query.get_partials_list(trange)
-                       if t.is_indexed}
+                       for t in self.query.get_partials_list(trange)}
 
         if not partials:
             # can happen when none of the partials is indexed
@@ -219,11 +218,9 @@ class SearchBuilder:
         addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
         addr_tokens = list({t.token for t in addr_partials})
 
-        partials_indexed = all(t.is_indexed for t in name_partials.values()) \
-                           and all(t.is_indexed for t in addr_partials)
         exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
 
-        if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed:
+        if (len(name_partials) > 3 or exp_count < 8000):
             yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
             return
 
@@ -232,8 +229,6 @@ class SearchBuilder:
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
         if name_fulls:
             fulls_count = sum(t.count for t in name_fulls)
-            if partials_indexed:
-                penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
 
             if fulls_count < 50000 or addr_count < 30000:
                 yield penalty,fulls_count / (2**len(addr_tokens)), \
@@ -243,8 +238,7 @@ class SearchBuilder:
         # To catch remaining results, lookup by name and address
         # We only do this if there is a reasonable number of results expected.
         exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
-        if exp_count < 10000 and addr_count < 20000\
-           and all(t.is_indexed for t in name_partials.values()):
+        if exp_count < 10000 and addr_count < 20000:
             penalty += 0.35 * max(1 if name_fulls else 0.1,
                                   5 - len(name_partials) - len(addr_tokens))
             yield penalty, exp_count,\
@@ -260,11 +254,10 @@ class SearchBuilder:
         addr_restrict_tokens = []
         addr_lookup_tokens = []
         for t in addr_partials:
-            if t.is_indexed:
-                if t.addr_count > 20000:
-                    addr_restrict_tokens.append(t.token)
-                else:
-                    addr_lookup_tokens.append(t.token)
+            if t.addr_count > 20000:
+                addr_restrict_tokens.append(t.token)
+            else:
+                addr_lookup_tokens.append(t.token)
 
         if addr_restrict_tokens:
             lookup.append(dbf.FieldLookup('nameaddress_vector',
@@ -289,13 +282,12 @@ class SearchBuilder:
             addr_restrict_tokens = []
             addr_lookup_tokens = []
             for t in addr_partials:
-                if t.is_indexed:
-                    if t.addr_count > 20000:
-                        addr_restrict_tokens.append(t.token)
-                    else:
-                        addr_lookup_tokens.append(t.token)
+                if t.addr_count > 20000:
+                    addr_restrict_tokens.append(t.token)
+                else:
+                    addr_lookup_tokens.append(t.token)
         else:
-            addr_restrict_tokens = [t.token for t in addr_partials if t.is_indexed]
+            addr_restrict_tokens = [t.token for t in addr_partials]
             addr_lookup_tokens = []
 
         return dbf.lookup_by_any_name([t.token for t in name_fulls],
index 971e95beec1a6935b58e7c9cc4879d9797a73f1b..1aadc97e80170181fad8c35a1d70f7eb6464696d 100644 (file)
@@ -123,7 +123,7 @@ class ICUToken(qmod.Token):
             lookup_word = row.word_token
 
         return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
-                        lookup_word=lookup_word, is_indexed=True,
+                        lookup_word=lookup_word,
                         word_token=row.word_token, info=row.info,
                         addr_count=max(1, addr_count))
 
@@ -259,7 +259,9 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
             if len(part.token) <= 4 and part[0].isdigit()\
                and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
                 query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
-                                ICUToken(0.5, 0, 1, 1, part.token, True, part.token, None))
+                                ICUToken(penalty=0.5, token=0,
+                                         count=1, addr_count=1, lookup_word=part.token,
+                                         word_token=part.token, info=None))
 
 
     def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
index 04b7f1b8a1e6cf695ccbd881a6f034332727036a..53482df84a11d228ec5c771b71dfc739f9258b1e 100644 (file)
@@ -101,7 +101,6 @@ class Token(ABC):
     count: int
     addr_count: int
     lookup_word: str
-    is_indexed: bool
 
 
     @abstractmethod
index 7154ae084317845075cb3efe57f40cca7f099f94..71caf5b7f282c400ccb8c6fc2a48e08b5bfa54d4 100644 (file)
@@ -19,7 +19,7 @@ class MyToken(query.Token):
 
 def mktoken(tid: int):
     return MyToken(penalty=3.0, token=tid, count=1, addr_count=1,
-                   lookup_word='foo', is_indexed=True)
+                   lookup_word='foo')
 
 
 @pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),
index 5d984014d18113e15f6198d246db64e5b000edad..371a6f024ca886ab17ca5f789c168b2ddd8242b9 100644 (file)
@@ -33,7 +33,7 @@ def make_query(*args):
                 q.add_token(TokenRange(start, end), ttype,
                             MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0,
                                     token=tid, count=1, addr_count=1,
-                                    lookup_word=word, is_indexed=True))
+                                    lookup_word=word))
 
 
     return q
@@ -397,14 +397,14 @@ def make_counted_searches(name_part, name_full, address_part, address_full,
     q.add_node(BreakType.END, PhraseType.NONE)
 
     q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
-                MyToken(0.5, 1, name_part, 1, 'name_part', True))
+                MyToken(0.5, 1, name_part, 1, 'name_part'))
     q.add_token(TokenRange(0, 1), TokenType.WORD,
-                MyToken(0, 101, name_full, 1, 'name_full', True))
+                MyToken(0, 101, name_full, 1, 'name_full'))
     for i in range(num_address_parts):
         q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL,
-                    MyToken(0.5, 2, address_part, 1, 'address_part', True))
+                    MyToken(0.5, 2, address_part, 1, 'address_part'))
         q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD,
-                    MyToken(0, 102, address_full, 1, 'address_full', True))
+                    MyToken(0, 102, address_full, 1, 'address_full'))
 
     builder = SearchBuilder(q, SearchDetails())
 
index 884d29328380323d08749964046d951f8c37f999..0d89ed5f522cee983acc5c437ef2bc28ee2e6af3 100644 (file)
@@ -20,7 +20,7 @@ class MyToken(Token):
 def make_query(*args):
     q = QueryStruct([Phrase(args[0][1], '')])
     dummy = MyToken(penalty=3.0, token=45, count=1, addr_count=1,
-                    lookup_word='foo', is_indexed=True)
+                    lookup_word='foo')
 
     for btype, ptype, _ in args[1:]:
         q.add_node(btype, ptype)