From: Sarah Hoffmann Date: Sat, 12 Aug 2023 09:26:02 +0000 (+0200) Subject: improve penalty for token-split words X-Git-Tag: v4.3.0~34 X-Git-Url: https://git.openstreetmap.org./nominatim.git/commitdiff_plain/3d0bc85b4d33c8e88321785762edb50e531aac55 improve penalty for token-split words The rematch penalty for partial words created by the transliteration need to take into account that they are rematched against the full word. That means that missing beginning and end should not get a significant penalty. --- diff --git a/nominatim/api/search/icu_tokenizer.py b/nominatim/api/search/icu_tokenizer.py index f259995d..7bf516e3 100644 --- a/nominatim/api/search/icu_tokenizer.py +++ b/nominatim/api/search/icu_tokenizer.py @@ -83,7 +83,7 @@ class ICUToken(qmod.Token): seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm) distance = 0 for tag, afrom, ato, bfrom, bto in seq.get_opcodes(): - if tag == 'delete' and (afrom == 0 or ato == len(self.lookup_word)): + if tag in ('delete', 'insert') and (afrom == 0 or ato == len(self.lookup_word)): distance += 1 elif tag == 'replace': distance += max((ato-afrom), (bto-bfrom))