]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/api/search/db_search_builder.py
Merge pull request #3293 from lonvia/rematch-against-country-code
[nominatim.git] / nominatim / api / search / db_search_builder.py
index a0018480d2dee7cf10525e051a8e97e6b3641475..fd8cc7af90ffb3aa71581aac842e602d82cc0d39 100644 (file)
@@ -15,6 +15,7 @@ from nominatim.api.search.query import QueryStruct, Token, TokenType, TokenRange
 from nominatim.api.search.token_assignment import TokenAssignment
 import nominatim.api.search.db_search_fields as dbf
 import nominatim.api.search.db_searches as dbs
 from nominatim.api.search.token_assignment import TokenAssignment
 import nominatim.api.search.db_search_fields as dbf
 import nominatim.api.search.db_searches as dbs
+import nominatim.api.search.db_search_lookups as lookups
 
 
 def wrap_near_search(categories: List[Tuple[str, str]],
 
 
 def wrap_near_search(categories: List[Tuple[str, str]],
@@ -90,6 +91,8 @@ class SearchBuilder:
             return
 
         near_items = self.get_near_items(assignment)
             return
 
         near_items = self.get_near_items(assignment)
+        if near_items is not None and not near_items:
+            return # impossible compbination of near items and category parameter
 
         if assignment.name is None:
             if near_items and not sdata.postcodes:
 
         if assignment.name is None:
             if near_items and not sdata.postcodes:
@@ -111,7 +114,10 @@ class SearchBuilder:
             penalty = min(near_items.penalties)
             near_items.penalties = [p - penalty for p in near_items.penalties]
             for search in builder:
             penalty = min(near_items.penalties)
             near_items.penalties = [p - penalty for p in near_items.penalties]
             for search in builder:
-                yield dbs.NearSearch(penalty + assignment.penalty, near_items, search)
+                search_penalty = search.penalty
+                search.penalty = 0.0
+                yield dbs.NearSearch(penalty + assignment.penalty + search_penalty,
+                                     near_items, search)
         else:
             for search in builder:
                 search.penalty += assignment.penalty
         else:
             for search in builder:
                 search.penalty += assignment.penalty
@@ -147,7 +153,7 @@ class SearchBuilder:
                 sdata.lookups = [dbf.FieldLookup('nameaddress_vector',
                                                  [t.token for r in address
                                                   for t in self.query.get_partials_list(r)],
                 sdata.lookups = [dbf.FieldLookup('nameaddress_vector',
                                                  [t.token for r in address
                                                   for t in self.query.get_partials_list(r)],
-                                                 'restrict')]
+                                                 lookups.Restrict)]
                 penalty += 0.2
             yield dbs.PostcodeSearch(penalty, sdata)
 
                 penalty += 0.2
             yield dbs.PostcodeSearch(penalty, sdata)
 
@@ -157,23 +163,27 @@ class SearchBuilder:
         """ Build a simple address search for special entries where the
             housenumber is the main name token.
         """
         """ Build a simple address search for special entries where the
             housenumber is the main name token.
         """
-        sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], 'lookup_any')]
+        sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], lookups.LookupAny)]
+        expected_count = sum(t.count for t in hnrs)
 
         partials = [t for trange in address
                        for t in self.query.get_partials_list(trange)]
 
 
         partials = [t for trange in address
                        for t in self.query.get_partials_list(trange)]
 
-        if len(partials) != 1 or partials[0].count < 10000:
+        if expected_count < 8000:
             sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
             sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
-                                                 [t.token for t in partials], 'lookup_all'))
+                                                 [t.token for t in partials], lookups.Restrict))
+        elif len(partials) != 1 or partials[0].count < 10000:
+            sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
+                                                 [t.token for t in partials], lookups.LookupAll))
         else:
             sdata.lookups.append(
                 dbf.FieldLookup('nameaddress_vector',
                                 [t.token for t
                                  in self.query.get_tokens(address[0], TokenType.WORD)],
         else:
             sdata.lookups.append(
                 dbf.FieldLookup('nameaddress_vector',
                                 [t.token for t
                                  in self.query.get_tokens(address[0], TokenType.WORD)],
-                                'lookup_any'))
+                                lookups.LookupAny))
 
         sdata.housenumbers = dbf.WeightedStrings([], [])
 
         sdata.housenumbers = dbf.WeightedStrings([], [])
-        yield dbs.PlaceSearch(0.05, sdata, sum(t.count for t in hnrs))
+        yield dbs.PlaceSearch(0.05, sdata, expected_count)
 
 
     def build_name_search(self, sdata: dbf.SearchData,
 
 
     def build_name_search(self, sdata: dbf.SearchData,
@@ -214,24 +224,25 @@ class SearchBuilder:
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
 
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
-        fulls_count = sum(t.count for t in name_fulls)
-        # At this point drop unindexed partials from the address.
-        # This might yield wrong results, nothing we can do about that.
-        if not partials_indexed:
-            addr_tokens = [t.token for t in addr_partials if t.is_indexed]
-            penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
-        # Any of the full names applies with all of the partials from the address
-        yield penalty, fulls_count / (2**len(addr_partials)),\
-              dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens,
-                                     'restrict' if fulls_count < 10000 else 'lookup_all')
+        if name_fulls:
+            fulls_count = sum(t.count for t in name_fulls)
+            # At this point drop unindexed partials from the address.
+            # This might yield wrong results, nothing we can do about that.
+            if not partials_indexed:
+                addr_tokens = [t.token for t in addr_partials if t.is_indexed]
+                penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
+            # Any of the full names applies with all of the partials from the address
+            yield penalty, fulls_count / (2**len(addr_partials)),\
+                  dbf.lookup_by_any_name([t.token for t in name_fulls],
+                                         addr_tokens, fulls_count > 10000)
 
         # To catch remaining results, lookup by name and address
         # We only do this if there is a reasonable number of results expected.
         exp_count = exp_count / (2**len(addr_partials)) if addr_partials else exp_count
         if exp_count < 10000 and all(t.is_indexed for t in name_partials):
 
         # To catch remaining results, lookup by name and address
         # We only do this if there is a reasonable number of results expected.
         exp_count = exp_count / (2**len(addr_partials)) if addr_partials else exp_count
         if exp_count < 10000 and all(t.is_indexed for t in name_partials):
-            lookup = [dbf.FieldLookup('name_vector', name_tokens, 'lookup_all')]
+            lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
             if addr_tokens:
             if addr_tokens:
-                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
+                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, lookups.LookupAll))
             penalty += 0.35 * max(0, 5 - len(name_partials) - len(addr_tokens))
             yield penalty, exp_count, lookup
 
             penalty += 0.35 * max(0, 5 - len(name_partials) - len(addr_tokens))
             yield penalty, exp_count, lookup
 
@@ -348,6 +359,9 @@ class SearchBuilder:
             tokens: Dict[Tuple[str, str], float] = {}
             for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
                 cat = t.get_category()
             tokens: Dict[Tuple[str, str], float] = {}
             for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
                 cat = t.get_category()
+                # The category of a near search will be that of near_item.
+                # Thus, if search is restricted to a category parameter,
+                # the two sets must intersect.
                 if (not self.details.categories or cat in self.details.categories)\
                    and t.penalty < tokens.get(cat, 1000.0):
                     tokens[cat] = t.penalty
                 if (not self.details.categories or cat in self.details.categories)\
                    and t.penalty < tokens.get(cat, 1000.0):
                     tokens[cat] = t.penalty