From 927d2cc824e0437dd2ea6abc4ef47c9b3ed3d0aa Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Mon, 17 Jul 2023 16:25:39 +0200 Subject: [PATCH] do not split names from typed phrases When phrases are typed, they should only contain exactly one term. --- nominatim/api/search/query.py | 22 ++++++-- nominatim/api/search/token_assignment.py | 69 +++++++++++++++--------- test/python/api/search/test_query.py | 49 +++++++++++++++++ 3 files changed, 112 insertions(+), 28 deletions(-) create mode 100644 test/python/api/search/test_query.py diff --git a/nominatim/api/search/query.py b/nominatim/api/search/query.py index f2b18f87..5d75eb0f 100644 --- a/nominatim/api/search/query.py +++ b/nominatim/api/search/query.py @@ -7,7 +7,7 @@ """ Datastructures for a tokenized query. """ -from typing import List, Tuple, Optional, NamedTuple, Iterator +from typing import List, Tuple, Optional, Iterator from abc import ABC, abstractmethod import dataclasses import enum @@ -107,13 +107,29 @@ class Token(ABC): category objects. """ - -class TokenRange(NamedTuple): +@dataclasses.dataclass +class TokenRange: """ Indexes of query nodes over which a token spans. """ start: int end: int + def __lt__(self, other: 'TokenRange') -> bool: + return self.end <= other.start + + + def __le__(self, other: 'TokenRange') -> bool: + return NotImplemented + + + def __gt__(self, other: 'TokenRange') -> bool: + return self.start >= other.end + + + def __ge__(self, other: 'TokenRange') -> bool: + return NotImplemented + + def replace_start(self, new_start: int) -> 'TokenRange': """ Return a new token range with the new start. """ diff --git a/nominatim/api/search/token_assignment.py b/nominatim/api/search/token_assignment.py index 33fb7335..0ae2cd43 100644 --- a/nominatim/api/search/token_assignment.py +++ b/nominatim/api/search/token_assignment.py @@ -288,18 +288,29 @@ class _TokenSequence: yield dataclasses.replace(base, penalty=self.penalty, name=first, address=base.address[1:]) - if (not base.housenumber or first.end >= base.housenumber.start)\ - and (not base.qualifier or first.start >= base.qualifier.end): - base_penalty = self.penalty - if (base.housenumber and base.housenumber.start > first.start) \ - or len(query.source) > 1: - base_penalty += 0.25 - for i in range(first.start + 1, first.end): - name, addr = first.split(i) - penalty = base_penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype] - log().comment(f'split first word = name ({i - first.start})') - yield dataclasses.replace(base, name=name, penalty=penalty, - address=[addr] + base.address[1:]) + # To paraphrase: + # * if another name term comes after the first one and before the + # housenumber + # * a qualifier comes after the name + # * the containing phrase is strictly typed + if (base.housenumber and first.end < base.housenumber.start)\ + or (base.qualifier and base.qualifier > first)\ + or (query.nodes[first.start].ptype != qmod.PhraseType.NONE): + return + + penalty = self.penalty + + # Penalty for: + # * , , , ... + # * queries that are comma-separated + if (base.housenumber and base.housenumber > first) or len(query.source) > 1: + penalty += 0.25 + + for i in range(first.start + 1, first.end): + name, addr = first.split(i) + log().comment(f'split first word = name ({i - first.start})') + yield dataclasses.replace(base, name=name, address=[addr] + base.address[1:], + penalty=penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]) def _get_assignments_address_backward(self, base: TokenAssignment, @@ -314,19 +325,27 @@ class _TokenSequence: yield dataclasses.replace(base, penalty=self.penalty, name=last, address=base.address[:-1]) - if (not base.housenumber or last.start <= base.housenumber.end)\ - and (not base.qualifier or last.end <= base.qualifier.start): - base_penalty = self.penalty - if base.housenumber and base.housenumber.start < last.start: - base_penalty += 0.4 - if len(query.source) > 1: - base_penalty += 0.25 - for i in range(last.start + 1, last.end): - addr, name = last.split(i) - penalty = base_penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype] - log().comment(f'split last word = name ({i - last.start})') - yield dataclasses.replace(base, name=name, penalty=penalty, - address=base.address[:-1] + [addr]) + # To paraphrase: + # * if another name term comes before the last one and after the + # housenumber + # * a qualifier comes before the name + # * the containing phrase is strictly typed + if (base.housenumber and last.start > base.housenumber.end)\ + or (base.qualifier and base.qualifier < last)\ + or (query.nodes[last.start].ptype != qmod.PhraseType.NONE): + return + + penalty = self.penalty + if base.housenumber and base.housenumber < last: + penalty += 0.4 + if len(query.source) > 1: + penalty += 0.25 + + for i in range(last.start + 1, last.end): + addr, name = last.split(i) + log().comment(f'split last word = name ({i - last.start})') + yield dataclasses.replace(base, name=name, address=base.address[:-1] + [addr], + penalty=penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]) def get_assignments(self, query: qmod.QueryStruct) -> Iterator[TokenAssignment]: diff --git a/test/python/api/search/test_query.py b/test/python/api/search/test_query.py new file mode 100644 index 00000000..a4b32824 --- /dev/null +++ b/test/python/api/search/test_query.py @@ -0,0 +1,49 @@ + +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Nominatim. (https://nominatim.org) +# +# Copyright (C) 2023 by the Nominatim developer community. +# For a full list of authors see the git log. +""" +Test data types for search queries. +""" +import pytest + +import nominatim.api.search.query as nq + +def test_token_range_equal(): + assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3) + assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3)) + + +@pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)), + ((3, 4), (3, 5)), + ((10, 12), (11, 12))]) +def test_token_range_unequal(lop, rop): + assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop)) + assert nq.TokenRange(*lop) != nq.TokenRange(*rop) + + +def test_token_range_lt(): + assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12) + assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8) + assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5) + assert not(nq.TokenRange(5, 6) < nq.TokenRange(5, 6)) + assert not(nq.TokenRange(10, 11) < nq.TokenRange(4, 5)) + + +def test_token_rankge_gt(): + assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2) + assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11) + assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10) + assert not(nq.TokenRange(5, 6) > nq.TokenRange(5, 6)) + assert not(nq.TokenRange(1, 2) > nq.TokenRange(3, 4)) + assert not(nq.TokenRange(4, 10) > nq.TokenRange(3, 5)) + + +def test_token_range_unimplemented_ops(): + with pytest.raises(TypeError): + nq.TokenRange(1, 3) <= nq.TokenRange(10, 12) + with pytest.raises(TypeError): + nq.TokenRange(1, 3) >= nq.TokenRange(10, 12) -- 2.39.5