]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/api/search/db_search_builder.py
add tests for interaction of category parameter with category terms
[nominatim.git] / nominatim / api / search / db_search_builder.py
index 9ff8c03c90c3d6ef4b7f1ff1c038e24bdb165171..f89d8b62e827178928bcc3c8d7fd210a30ef00a8 100644 (file)
@@ -7,7 +7,7 @@
 """
 Convertion from token assignment to an abstract DB search.
 """
 """
 Convertion from token assignment to an abstract DB search.
 """
-from typing import Optional, List, Tuple, Iterator
+from typing import Optional, List, Tuple, Iterator, Dict
 import heapq
 
 from nominatim.api.types import SearchDetails, DataLayer
 import heapq
 
 from nominatim.api.types import SearchDetails, DataLayer
@@ -15,7 +15,6 @@ from nominatim.api.search.query import QueryStruct, Token, TokenType, TokenRange
 from nominatim.api.search.token_assignment import TokenAssignment
 import nominatim.api.search.db_search_fields as dbf
 import nominatim.api.search.db_searches as dbs
 from nominatim.api.search.token_assignment import TokenAssignment
 import nominatim.api.search.db_search_fields as dbf
 import nominatim.api.search.db_searches as dbs
-from nominatim.api.logging import log
 
 
 def wrap_near_search(categories: List[Tuple[str, str]],
 
 
 def wrap_near_search(categories: List[Tuple[str, str]],
@@ -90,12 +89,14 @@ class SearchBuilder:
         if sdata is None:
             return
 
         if sdata is None:
             return
 
-        categories = self.get_search_categories(assignment)
+        near_items = self.get_near_items(assignment)
+        if near_items is not None and not near_items:
+            return # impossible compbination of near items and category parameter
 
         if assignment.name is None:
 
         if assignment.name is None:
-            if categories and not sdata.postcodes:
-                sdata.qualifiers = categories
-                categories = None
+            if near_items and not sdata.postcodes:
+                sdata.qualifiers = near_items
+                near_items = None
                 builder = self.build_poi_search(sdata)
             elif assignment.housenumber:
                 hnr_tokens = self.query.get_tokens(assignment.housenumber,
                 builder = self.build_poi_search(sdata)
             elif assignment.housenumber:
                 hnr_tokens = self.query.get_tokens(assignment.housenumber,
@@ -103,18 +104,20 @@ class SearchBuilder:
                 builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
             else:
                 builder = self.build_special_search(sdata, assignment.address,
                 builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
             else:
                 builder = self.build_special_search(sdata, assignment.address,
-                                                    bool(categories))
+                                                    bool(near_items))
         else:
             builder = self.build_name_search(sdata, assignment.name, assignment.address,
         else:
             builder = self.build_name_search(sdata, assignment.name, assignment.address,
-                                             bool(categories))
+                                             bool(near_items))
 
 
-        if categories:
-            penalty = min(categories.penalties)
-            categories.penalties = [p - penalty for p in categories.penalties]
+        if near_items:
+            penalty = min(near_items.penalties)
+            near_items.penalties = [p - penalty for p in near_items.penalties]
             for search in builder:
             for search in builder:
-                yield dbs.NearSearch(penalty, categories, search)
+                yield dbs.NearSearch(penalty + assignment.penalty, near_items, search)
         else:
         else:
-            yield from builder
+            for search in builder:
+                search.penalty += assignment.penalty
+                yield search
 
 
     def build_poi_search(self, sdata: dbf.SearchData) -> Iterator[dbs.AbstractSearch]:
 
 
     def build_poi_search(self, sdata: dbf.SearchData) -> Iterator[dbs.AbstractSearch]:
@@ -156,13 +159,22 @@ class SearchBuilder:
         """ Build a simple address search for special entries where the
             housenumber is the main name token.
         """
         """ Build a simple address search for special entries where the
             housenumber is the main name token.
         """
-        partial_tokens: List[int] = []
-        for trange in address:
-            partial_tokens.extend(t.token for t in self.query.get_partials_list(trange))
+        sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], 'lookup_any')]
+
+        partials = [t for trange in address
+                       for t in self.query.get_partials_list(trange)]
+
+        if len(partials) != 1 or partials[0].count < 10000:
+            sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
+                                                 [t.token for t in partials], 'lookup_all'))
+        else:
+            sdata.lookups.append(
+                dbf.FieldLookup('nameaddress_vector',
+                                [t.token for t
+                                 in self.query.get_tokens(address[0], TokenType.WORD)],
+                                'lookup_any'))
 
 
-        sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], 'lookup_any'),
-                         dbf.FieldLookup('nameaddress_vector', partial_tokens, 'lookup_all')
-                        ]
+        sdata.housenumbers = dbf.WeightedStrings([], [])
         yield dbs.PlaceSearch(0.05, sdata, sum(t.count for t in hnrs))
 
 
         yield dbs.PlaceSearch(0.05, sdata, sum(t.count for t in hnrs))
 
 
@@ -187,65 +199,43 @@ class SearchBuilder:
             be searched for. This takes into account how frequent the terms
             are and tries to find a lookup that optimizes index use.
         """
             be searched for. This takes into account how frequent the terms
             are and tries to find a lookup that optimizes index use.
         """
-        penalty = 0.0 # extra penalty currently unused
-
+        penalty = 0.0 # extra penalty
         name_partials = self.query.get_partials_list(name)
         name_partials = self.query.get_partials_list(name)
-        exp_name_count = min(t.count for t in name_partials)
-        addr_partials = []
-        for trange in address:
-            addr_partials.extend(self.query.get_partials_list(trange))
+        name_tokens = [t.token for t in name_partials]
+
+        addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
         addr_tokens = [t.token for t in addr_partials]
         addr_tokens = [t.token for t in addr_partials]
+
         partials_indexed = all(t.is_indexed for t in name_partials) \
                            and all(t.is_indexed for t in addr_partials)
         partials_indexed = all(t.is_indexed for t in name_partials) \
                            and all(t.is_indexed for t in addr_partials)
+        exp_count = min(t.count for t in name_partials) / (2**(len(name_partials) - 1))
 
 
-        if (len(name_partials) > 3 or exp_name_count < 1000) and partials_indexed:
-            # Lookup by name partials, use address partials to restrict results.
-            lookup = [dbf.FieldLookup('name_vector',
-                                  [t.token for t in name_partials], 'lookup_all')]
-            if addr_tokens:
-                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'restrict'))
-            yield penalty, exp_name_count, lookup
-            return
-
-        exp_addr_count = min(t.count for t in addr_partials) if addr_partials else exp_name_count
-        if exp_addr_count < 1000 and partials_indexed:
-            # Lookup by address partials and restrict results through name terms.
-            yield penalty, exp_addr_count,\
-                  [dbf.FieldLookup('name_vector', [t.token for t in name_partials], 'restrict'),
-                   dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all')]
+        if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed:
+            yield penalty, exp_count, dbf.lookup_by_names(name_tokens, addr_tokens)
             return
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
             return
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
-        rare_names = list(filter(lambda t: t.count < 1000, name_fulls))
+        fulls_count = sum(t.count for t in name_fulls)
         # At this point drop unindexed partials from the address.
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
         # At this point drop unindexed partials from the address.
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
-            log().var_dump('before', penalty)
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
-            log().var_dump('after', penalty)
-        if rare_names:
-            # Any of the full names applies with all of the partials from the address
-            lookup = [dbf.FieldLookup('name_vector', [t.token for t in rare_names], 'lookup_any')]
-            if addr_tokens:
-                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'restrict'))
-            yield penalty, sum(t.count for t in rare_names), lookup
+        # Any of the full names applies with all of the partials from the address
+        yield penalty, fulls_count / (2**len(addr_partials)),\
+              dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens,
+                                     'restrict' if fulls_count < 10000 else 'lookup_all')
 
         # To catch remaining results, lookup by name and address
 
         # To catch remaining results, lookup by name and address
-        if all(t.is_indexed for t in name_partials):
-            lookup = [dbf.FieldLookup('name_vector',
-                                      [t.token for t in name_partials], 'lookup_all')]
-        else:
-            # we don't have the partials, try with the non-rare names
-            non_rare_names = [t.token for t in name_fulls if t.count >= 1000]
-            if not non_rare_names:
-                return
-            lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')]
-        if addr_tokens:
-            lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
-        yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\
-              min(exp_name_count, exp_addr_count), lookup
+        # We only do this if there is a reasonable number of results expected.
+        exp_count = exp_count / (2**len(addr_partials)) if addr_partials else exp_count
+        if exp_count < 10000 and all(t.is_indexed for t in name_partials):
+            lookup = [dbf.FieldLookup('name_vector', name_tokens, 'lookup_all')]
+            if addr_tokens:
+                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
+            penalty += 0.35 * max(0, 5 - len(name_partials) - len(addr_tokens))
+            yield penalty, exp_count, lookup
 
 
     def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
 
 
     def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
@@ -333,8 +323,15 @@ class SearchBuilder:
                               self.query.get_tokens(assignment.postcode,
                                                     TokenType.POSTCODE))
         if assignment.qualifier:
                               self.query.get_tokens(assignment.postcode,
                                                     TokenType.POSTCODE))
         if assignment.qualifier:
-            sdata.set_qualifiers(self.query.get_tokens(assignment.qualifier,
-                                                       TokenType.QUALIFIER))
+            tokens = self.query.get_tokens(assignment.qualifier, TokenType.QUALIFIER)
+            if self.details.categories:
+                tokens = [t for t in tokens if t.get_category() in self.details.categories]
+                if not tokens:
+                    return None
+            sdata.set_qualifiers(tokens)
+        elif self.details.categories:
+            sdata.qualifiers = dbf.WeightedCategories(self.details.categories,
+                                                      [0.0] * len(self.details.categories))
 
         if assignment.address:
             sdata.set_ranking([self.get_addr_ranking(r) for r in assignment.address])
 
         if assignment.address:
             sdata.set_ranking([self.get_addr_ranking(r) for r in assignment.address])
@@ -344,23 +341,22 @@ class SearchBuilder:
         return sdata
 
 
         return sdata
 
 
-    def get_search_categories(self,
-                              assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
-        """ Collect tokens for category search or use the categories
+    def get_near_items(self, assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
+        """ Collect tokens for near items search or use the categories
             requested per parameter.
             Returns None if no category search is requested.
         """
             requested per parameter.
             Returns None if no category search is requested.
         """
-        if assignment.category:
-            tokens = [t for t in self.query.get_tokens(assignment.category,
-                                                       TokenType.CATEGORY)
-                      if not self.details.categories
-                         or t.get_category() in self.details.categories]
-            return dbf.WeightedCategories([t.get_category() for t in tokens],
-                                          [t.penalty for t in tokens])
-
-        if self.details.categories:
-            return dbf.WeightedCategories(self.details.categories,
-                                          [0.0] * len(self.details.categories))
+        if assignment.near_item:
+            tokens: Dict[Tuple[str, str], float] = {}
+            for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
+                cat = t.get_category()
+                # The category of a near search will be that of near_item.
+                # Thus, if search is restricted to a category parameter,
+                # the two sets must intersect.
+                if (not self.details.categories or cat in self.details.categories)\
+                   and t.penalty < tokens.get(cat, 1000.0):
+                    tokens[cat] = t.penalty
+            return dbf.WeightedCategories(list(tokens.keys()), list(tokens.values()))
 
         return None
 
 
         return None