log().table_dump('Searches for assignment',
_dump_searches(searches, query, num_searches))
num_searches = len(searches)
- searches.sort(key=lambda s: s.penalty)
+ searches.sort(key=lambda s: (s.penalty, s.SEARCH_PRIO))
return query, searches
end_time = dt.datetime.now() + self.timeout
- min_ranking = 1000.0
+ min_ranking = searches[0].penalty + 2.0
prev_penalty = 0.0
for i, search in enumerate(searches):
if search.penalty > prev_penalty and (search.penalty > min_ranking or i > 20):
break
log().table_dump(f"{i + 1}. Search", _dump_searches([search], query))
+ log().var_dump('Params', self.params)
lookup_results = await search.lookup(self.conn, self.params)
for result in lookup_results:
rhash = (result.source_table, result.place_id,
prevresult.accuracy = min(prevresult.accuracy, result.accuracy)
else:
results[rhash] = result
- min_ranking = min(min_ranking, result.ranking + 0.5, search.penalty + 0.3)
+ min_ranking = min(min_ranking, result.accuracy * 1.2)
log().result_dump('Results', ((r.accuracy, r) for r in lookup_results))
prev_penalty = search.penalty
if dt.datetime.now() >= end_time:
return
for result in results:
- if not result.display_name:
+ # Negative importance indicates ordering by distance, which is
+ # more important than word matching.
+ if not result.display_name\
+ or (result.importance is not None and result.importance < 0):
continue
distance = 0.0
norm = self.query_analyzer.normalize_text(result.display_name)