]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/api/search/db_search_builder.py
Merge pull request #3260 from lonvia/improve-catgeory-search
[nominatim.git] / nominatim / api / search / db_search_builder.py
index c9e48b0f3784f1bb7f6cd6cc9934b25c757c7b33..7826925aed6ce77271e92bbef4612a3b1e5357bd 100644 (file)
@@ -7,7 +7,7 @@
 """
 Convertion from token assignment to an abstract DB search.
 """
 """
 Convertion from token assignment to an abstract DB search.
 """
-from typing import Optional, List, Tuple, Iterator
+from typing import Optional, List, Tuple, Iterator, Dict
 import heapq
 
 from nominatim.api.types import SearchDetails, DataLayer
 import heapq
 
 from nominatim.api.types import SearchDetails, DataLayer
@@ -206,22 +206,22 @@ class SearchBuilder:
 
         partials_indexed = all(t.is_indexed for t in name_partials) \
                            and all(t.is_indexed for t in addr_partials)
 
         partials_indexed = all(t.is_indexed for t in name_partials) \
                            and all(t.is_indexed for t in addr_partials)
-        exp_count = min(t.count for t in name_partials)
+        exp_count = min(t.count for t in name_partials) / (2**(len(name_partials) - 1))
 
 
-        if (len(name_partials) > 3 or exp_count < 3000) and partials_indexed:
+        if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed:
             yield penalty, exp_count, dbf.lookup_by_names(name_tokens, addr_tokens)
             return
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
             yield penalty, exp_count, dbf.lookup_by_names(name_tokens, addr_tokens)
             return
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
-        fulls_count = sum(t.count for t in name_fulls) / (2**len(addr_partials))
+        fulls_count = sum(t.count for t in name_fulls)
         # At this point drop unindexed partials from the address.
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
         # Any of the full names applies with all of the partials from the address
         # At this point drop unindexed partials from the address.
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
         # Any of the full names applies with all of the partials from the address
-        yield penalty, fulls_count,\
+        yield penalty, fulls_count / (2**len(addr_partials)),\
               dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens,
                                      'restrict' if fulls_count < 10000 else 'lookup_all')
 
               dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens,
                                      'restrict' if fulls_count < 10000 else 'lookup_all')
 
@@ -339,12 +339,13 @@ class SearchBuilder:
             Returns None if no category search is requested.
         """
         if assignment.category:
             Returns None if no category search is requested.
         """
         if assignment.category:
-            tokens = [t for t in self.query.get_tokens(assignment.category,
-                                                       TokenType.CATEGORY)
-                      if not self.details.categories
-                         or t.get_category() in self.details.categories]
-            return dbf.WeightedCategories([t.get_category() for t in tokens],
-                                          [t.penalty for t in tokens])
+            tokens: Dict[Tuple[str, str], float] = {}
+            for t in self.query.get_tokens(assignment.category, TokenType.CATEGORY):
+                cat = t.get_category()
+                if (not self.details.categories or cat in self.details.categories)\
+                   and t.penalty < tokens.get(cat, 1000.0):
+                    tokens[cat] = t.penalty
+            return dbf.WeightedCategories(list(tokens.keys()), list(tokens.values()))
 
         if self.details.categories:
             return dbf.WeightedCategories(self.details.categories,
 
         if self.details.categories:
             return dbf.WeightedCategories(self.details.categories,