]> git.openstreetmap.org Git - nominatim.git/commitdiff
avoid splitting of first token when a housenumber is present
authorSarah Hoffmann <lonvia@denofr.de>
Fri, 14 Jul 2023 19:05:13 +0000 (21:05 +0200)
committerSarah Hoffmann <lonvia@denofr.de>
Mon, 17 Jul 2023 14:27:25 +0000 (16:27 +0200)
This only covers the case of <poi name> <street name> <housenumber>
which is exceedingly rare.

nominatim/api/search/db_search_builder.py
nominatim/api/search/token_assignment.py

index 794012b072f3302279faf803a8fe9565b0cceed4..d18fa96424ddf49a2e307654a719c555811ef2bb 100644 (file)
@@ -225,9 +225,7 @@ class SearchBuilder:
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
         # This might yield wrong results, nothing we can do about that.
         if not partials_indexed:
             addr_tokens = [t.token for t in addr_partials if t.is_indexed]
-            log().var_dump('before', penalty)
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
             penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
-            log().var_dump('after', penalty)
         if rare_names:
             # Any of the full names applies with all of the partials from the address
             lookup = [dbf.FieldLookup('name_vector', [t.token for t in rare_names], 'lookup_any')]
         if rare_names:
             # Any of the full names applies with all of the partials from the address
             lookup = [dbf.FieldLookup('name_vector', [t.token for t in rare_names], 'lookup_any')]
index 11da23594880f9f4353630e69e6e26dbee6f0f32..f1c2f8e8bf94f98c6ae6817eae19b8425fe40565 100644 (file)
@@ -309,9 +309,12 @@ class _TokenSequence:
                 first = base.address[0]
                 if (not base.housenumber or first.end >= base.housenumber.start)\
                    and (not base.qualifier or first.start >= base.qualifier.end):
                 first = base.address[0]
                 if (not base.housenumber or first.end >= base.housenumber.start)\
                    and (not base.qualifier or first.start >= base.qualifier.end):
+                    base_penalty = self.penalty
+                    if base.housenumber and base.housenumber.start > first.start:
+                        base_penalty += 0.25
                     for i in range(first.start + 1, first.end):
                         name, addr = first.split(i)
                     for i in range(first.start + 1, first.end):
                         name, addr = first.split(i)
-                        penalty = self.penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
+                        penalty = base_penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
                         log().comment(f'split first word = name ({i - first.start})')
                         yield dataclasses.replace(base, name=name, penalty=penalty,
                                                   address=[addr] + base.address[1:])
                         log().comment(f'split first word = name ({i - first.start})')
                         yield dataclasses.replace(base, name=name, penalty=penalty,
                                                   address=[addr] + base.address[1:])
@@ -321,9 +324,12 @@ class _TokenSequence:
                 last = base.address[-1]
                 if (not base.housenumber or last.start <= base.housenumber.end)\
                    and (not base.qualifier or last.end <= base.qualifier.start):
                 last = base.address[-1]
                 if (not base.housenumber or last.start <= base.housenumber.end)\
                    and (not base.qualifier or last.end <= base.qualifier.start):
+                    base_penalty = self.penalty
+                    if base.housenumber and base.housenumber.start < last.start:
+                        base_penalty += 0.4
                     for i in range(last.start + 1, last.end):
                         addr, name = last.split(i)
                     for i in range(last.start + 1, last.end):
                         addr, name = last.split(i)
-                        penalty = self.penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
+                        penalty = base_penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
                         log().comment(f'split last word = name ({i - last.start})')
                         yield dataclasses.replace(base, name=name, penalty=penalty,
                                                   address=base.address[:-1] + [addr])
                         log().comment(f'split last word = name ({i - last.start})')
                         yield dataclasses.replace(base, name=name, penalty=penalty,
                                                   address=base.address[:-1] + [addr])