X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/a4d7cdd2ad7b1e087751e55a56454781aa6580bc..8215748ac04689102fcfb5a014ad30c2a4d8ede0:/src/nominatim_api/search/db_search_builder.py diff --git a/src/nominatim_api/search/db_search_builder.py b/src/nominatim_api/search/db_search_builder.py index 6453509e..0d7487a4 100644 --- a/src/nominatim_api/search/db_search_builder.py +++ b/src/nominatim_api/search/db_search_builder.py @@ -167,8 +167,7 @@ class SearchBuilder: expected_count = sum(t.count for t in hnrs) partials = {t.token: t.addr_count for trange in address - for t in self.query.get_partials_list(trange) - if t.is_indexed} + for t in self.query.get_partials_list(trange)} if not partials: # can happen when none of the partials is indexed @@ -219,23 +218,19 @@ class SearchBuilder: addr_partials = [t for r in address for t in self.query.get_partials_list(r)] addr_tokens = list({t.token for t in addr_partials}) - partials_indexed = all(t.is_indexed for t in name_partials.values()) \ - and all(t.is_indexed for t in addr_partials) exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1)) - if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed: + if (len(name_partials) > 3 or exp_count < 8000): yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens) return - addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000 + addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000 # Partial term to frequent. Try looking up by rare full names first. name_fulls = self.query.get_tokens(name, TokenType.WORD) if name_fulls: fulls_count = sum(t.count for t in name_fulls) - if partials_indexed: - penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed) - if fulls_count < 50000 or addr_count < 30000: + if fulls_count < 80000 or addr_count < 50000: yield penalty,fulls_count / (2**len(addr_tokens)), \ self.get_full_name_ranking(name_fulls, addr_partials, fulls_count > 30000 / max(1, len(addr_tokens))) @@ -243,8 +238,7 @@ class SearchBuilder: # To catch remaining results, lookup by name and address # We only do this if there is a reasonable number of results expected. exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count - if exp_count < 10000 and addr_count < 20000\ - and all(t.is_indexed for t in name_partials.values()): + if exp_count < 10000 and addr_count < 20000: penalty += 0.35 * max(1 if name_fulls else 0.1, 5 - len(name_partials) - len(addr_tokens)) yield penalty, exp_count,\ @@ -260,11 +254,10 @@ class SearchBuilder: addr_restrict_tokens = [] addr_lookup_tokens = [] for t in addr_partials: - if t.is_indexed: - if t.addr_count > 20000: - addr_restrict_tokens.append(t.token) - else: - addr_lookup_tokens.append(t.token) + if t.addr_count > 20000: + addr_restrict_tokens.append(t.token) + else: + addr_lookup_tokens.append(t.token) if addr_restrict_tokens: lookup.append(dbf.FieldLookup('nameaddress_vector', @@ -287,15 +280,9 @@ class SearchBuilder: # This might yield wrong results, nothing we can do about that. if use_lookup: addr_restrict_tokens = [] - addr_lookup_tokens = [] - for t in addr_partials: - if t.is_indexed: - if t.addr_count > 20000: - addr_restrict_tokens.append(t.token) - else: - addr_lookup_tokens.append(t.token) + addr_lookup_tokens = [t.token for t in addr_partials] else: - addr_restrict_tokens = [t.token for t in addr_partials if t.is_indexed] + addr_restrict_tokens = [t.token for t in addr_partials] addr_lookup_tokens = [] return dbf.lookup_by_any_name([t.token for t in name_fulls],