X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/d8ed565bce27c638074fbc6f1961dfc0d160e312..b54ff7d7664eadf3fdf018622540d6fce835502e:/test/python/api/search/test_token_assignment.py?ds=inline diff --git a/test/python/api/search/test_token_assignment.py b/test/python/api/search/test_token_assignment.py index 6dc25b1e..884d2932 100644 --- a/test/python/api/search/test_token_assignment.py +++ b/test/python/api/search/test_token_assignment.py @@ -2,15 +2,15 @@ # # This file is part of Nominatim. (https://nominatim.org) # -# Copyright (C) 2023 by the Nominatim developer community. +# Copyright (C) 2024 by the Nominatim developer community. # For a full list of authors see the git log. """ Test for creation of token assignments from tokenized queries. """ import pytest -from nominatim.api.search.query import QueryStruct, Phrase, PhraseType, BreakType, TokenType, TokenRange, Token -from nominatim.api.search.token_assignment import yield_token_assignments, TokenAssignment, PENALTY_TOKENCHANGE +from nominatim_api.search.query import QueryStruct, Phrase, PhraseType, BreakType, TokenType, TokenRange, Token +from nominatim_api.search.token_assignment import yield_token_assignments, TokenAssignment, PENALTY_TOKENCHANGE class MyToken(Token): def get_category(self): @@ -19,7 +19,8 @@ class MyToken(Token): def make_query(*args): q = QueryStruct([Phrase(args[0][1], '')]) - dummy = MyToken(3.0, 45, 1, 'foo', True) + dummy = MyToken(penalty=3.0, token=45, count=1, addr_count=1, + lookup_word='foo', is_indexed=True) for btype, ptype, _ in args[1:]: q.add_node(btype, ptype) @@ -76,11 +77,11 @@ def test_single_country_name(): def test_single_word_poi_search(): q = make_query((BreakType.START, PhraseType.NONE, - [(1, TokenType.CATEGORY), + [(1, TokenType.NEAR_ITEM), (1, TokenType.QUALIFIER)])) res = list(yield_token_assignments(q)) - assert res == [TokenAssignment(category=TokenRange(0, 1))] + assert res == [TokenAssignment(near_item=TokenRange(0, 1))] @pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN]) @@ -182,7 +183,7 @@ def test_country_housenumber_postcode(): @pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY, - TokenType.CATEGORY, TokenType.QUALIFIER]) + TokenType.NEAR_ITEM, TokenType.QUALIFIER]) def test_housenumber_with_only_special_terms(ttype): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]), (BreakType.WORD, PhraseType.NONE, [(2, ttype)])) @@ -266,27 +267,27 @@ def test_postcode_with_designation_backwards(): address=[TokenRange(0, 1)])) -def test_category_at_beginning(): - q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.CATEGORY)]), +def test_near_item_at_beginning(): + q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.NEAR_ITEM)]), (BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)])) check_assignments(yield_token_assignments(q), TokenAssignment(penalty=0.1, name=TokenRange(1, 2), - category=TokenRange(0, 1))) + near_item=TokenRange(0, 1))) -def test_category_at_end(): +def test_near_item_at_end(): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), - (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)])) + (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)])) check_assignments(yield_token_assignments(q), TokenAssignment(penalty=0.1, name=TokenRange(0, 1), - category=TokenRange(1, 2))) + near_item=TokenRange(1, 2))) -def test_category_in_middle(): +def test_near_item_in_middle(): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), - (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]), + (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)]), (BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) check_assignments(yield_token_assignments(q)) @@ -337,3 +338,14 @@ def test_qualifier_after_housenumber(): (BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) check_assignments(yield_token_assignments(q)) + + +def test_qualifier_in_middle_of_phrase(): + q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), + (BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]), + (BreakType.WORD, PhraseType.NONE, [(3, TokenType.QUALIFIER)]), + (BreakType.WORD, PhraseType.NONE, [(4, TokenType.PARTIAL)]), + (BreakType.PHRASE, PhraseType.NONE, [(5, TokenType.PARTIAL)])) + + check_assignments(yield_token_assignments(q)) +