partials = {t.token: t.addr_count for trange in address
for t in self.query.get_partials_list(trange)}
+ if not partials:
+ # can happen when none of the partials is indexed
+ return
+
if expected_count < 8000:
sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
list(partials), lookups.Restrict))
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
addr_tokens = list({t.token for t in addr_partials})
- partials_indexed = all(t.is_indexed for t in name_partials.values()) \
- and all(t.is_indexed for t in addr_partials)
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
- if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed:
+ if (len(name_partials) > 3 or exp_count < 8000):
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
return
- addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
+ addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000
# Partial term to frequent. Try looking up by rare full names first.
name_fulls = self.query.get_tokens(name, TokenType.WORD)
if name_fulls:
fulls_count = sum(t.count for t in name_fulls)
- if partials_indexed:
- penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
- if fulls_count < 50000 or addr_count < 30000:
+ if fulls_count < 80000 or addr_count < 50000:
yield penalty,fulls_count / (2**len(addr_tokens)), \
self.get_full_name_ranking(name_fulls, addr_partials,
fulls_count > 30000 / max(1, len(addr_tokens)))
# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
- if exp_count < 10000 and addr_count < 20000\
- and all(t.is_indexed for t in name_partials.values()):
+ if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens))
yield penalty, exp_count,\
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
- if t.is_indexed:
- if t.addr_count > 20000:
- addr_restrict_tokens.append(t.token)
- else:
- addr_lookup_tokens.append(t.token)
+ if t.addr_count > 20000:
+ addr_restrict_tokens.append(t.token)
+ else:
+ addr_lookup_tokens.append(t.token)
if addr_restrict_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector',
use_lookup: bool) -> List[dbf.FieldLookup]:
""" Create a ranking expression with full name terms and
additional address lookup. When 'use_lookup' is true, then
- address lookups will use the index, when the occurences are not
+ address lookups will use the index, when the occurrences are not
too many.
"""
# At this point drop unindexed partials from the address.
# This might yield wrong results, nothing we can do about that.
if use_lookup:
addr_restrict_tokens = []
- addr_lookup_tokens = []
- for t in addr_partials:
- if t.is_indexed:
- if t.addr_count > 20000:
- addr_restrict_tokens.append(t.token)
- else:
- addr_lookup_tokens.append(t.token)
+ addr_lookup_tokens = [t.token for t in addr_partials]
else:
- addr_restrict_tokens = [t.token for t in addr_partials if t.is_indexed]
+ addr_restrict_tokens = [t.token for t in addr_partials]
addr_lookup_tokens = []
return dbf.lookup_by_any_name([t.token for t in name_fulls],