]> git.openstreetmap.org Git - nominatim.git/commitdiff
rename use of category as POI search to near_item
authorSarah Hoffmann <lonvia@denofr.de>
Tue, 28 Nov 2023 15:27:05 +0000 (16:27 +0100)
committerSarah Hoffmann <lonvia@denofr.de>
Tue, 28 Nov 2023 15:27:05 +0000 (16:27 +0100)
Use the term category only as a short-cut for "tuple of key and value".

nominatim/api/search/db_search_builder.py
nominatim/api/search/icu_tokenizer.py
nominatim/api/search/legacy_tokenizer.py
nominatim/api/search/query.py
nominatim/api/search/token_assignment.py
test/python/api/search/test_api_search_query.py
test/python/api/search/test_db_search_builder.py
test/python/api/search/test_icu_query_analyzer.py
test/python/api/search/test_legacy_query_analyzer.py
test/python/api/search/test_token_assignment.py

index 35d10e782e3d19026de775270c329c4db8dddb2b..a0018480d2dee7cf10525e051a8e97e6b3641475 100644 (file)
@@ -89,12 +89,12 @@ class SearchBuilder:
         if sdata is None:
             return
 
         if sdata is None:
             return
 
-        categories = self.get_search_categories(assignment)
+        near_items = self.get_near_items(assignment)
 
         if assignment.name is None:
 
         if assignment.name is None:
-            if categories and not sdata.postcodes:
-                sdata.qualifiers = categories
-                categories = None
+            if near_items and not sdata.postcodes:
+                sdata.qualifiers = near_items
+                near_items = None
                 builder = self.build_poi_search(sdata)
             elif assignment.housenumber:
                 hnr_tokens = self.query.get_tokens(assignment.housenumber,
                 builder = self.build_poi_search(sdata)
             elif assignment.housenumber:
                 hnr_tokens = self.query.get_tokens(assignment.housenumber,
@@ -102,16 +102,16 @@ class SearchBuilder:
                 builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
             else:
                 builder = self.build_special_search(sdata, assignment.address,
                 builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
             else:
                 builder = self.build_special_search(sdata, assignment.address,
-                                                    bool(categories))
+                                                    bool(near_items))
         else:
             builder = self.build_name_search(sdata, assignment.name, assignment.address,
         else:
             builder = self.build_name_search(sdata, assignment.name, assignment.address,
-                                             bool(categories))
+                                             bool(near_items))
 
 
-        if categories:
-            penalty = min(categories.penalties)
-            categories.penalties = [p - penalty for p in categories.penalties]
+        if near_items:
+            penalty = min(near_items.penalties)
+            near_items.penalties = [p - penalty for p in near_items.penalties]
             for search in builder:
             for search in builder:
-                yield dbs.NearSearch(penalty + assignment.penalty, categories, search)
+                yield dbs.NearSearch(penalty + assignment.penalty, near_items, search)
         else:
             for search in builder:
                 search.penalty += assignment.penalty
         else:
             for search in builder:
                 search.penalty += assignment.penalty
@@ -339,15 +339,14 @@ class SearchBuilder:
         return sdata
 
 
         return sdata
 
 
-    def get_search_categories(self,
-                              assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
-        """ Collect tokens for category search or use the categories
+    def get_near_items(self, assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
+        """ Collect tokens for near items search or use the categories
             requested per parameter.
             Returns None if no category search is requested.
         """
             requested per parameter.
             Returns None if no category search is requested.
         """
-        if assignment.category:
+        if assignment.near_item:
             tokens: Dict[Tuple[str, str], float] = {}
             tokens: Dict[Tuple[str, str], float] = {}
-            for t in self.query.get_tokens(assignment.category, TokenType.CATEGORY):
+            for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
                 cat = t.get_category()
                 if (not self.details.categories or cat in self.details.categories)\
                    and t.penalty < tokens.get(cat, 1000.0):
                 cat = t.get_category()
                 if (not self.details.categories or cat in self.details.categories)\
                    and t.penalty < tokens.get(cat, 1000.0):
index 196fde2a8444e69d5d74a0a0310dd94812425d96..fceec2df522feb5105936204b099e9a8a7a2ad96 100644 (file)
@@ -184,13 +184,13 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
                 if row.type == 'S':
                     if row.info['op'] in ('in', 'near'):
                         if trange.start == 0:
                 if row.type == 'S':
                     if row.info['op'] in ('in', 'near'):
                         if trange.start == 0:
-                            query.add_token(trange, qmod.TokenType.CATEGORY, token)
+                            query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
                     else:
                         query.add_token(trange, qmod.TokenType.QUALIFIER, token)
                         if trange.start == 0 or trange.end == query.num_token_slots():
                             token = copy(token)
                             token.penalty += 0.1 * (query.num_token_slots())
                     else:
                         query.add_token(trange, qmod.TokenType.QUALIFIER, token)
                         if trange.start == 0 or trange.end == query.num_token_slots():
                             token = copy(token)
                             token.penalty += 0.1 * (query.num_token_slots())
-                            query.add_token(trange, qmod.TokenType.CATEGORY, token)
+                            query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
                 else:
                     query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
 
                 else:
                     query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
 
index 26e4c126b626038f35b8fc97d447d98468d91372..e7984ee41832909fe608edd69dfc2dc6ec635a50 100644 (file)
@@ -107,15 +107,15 @@ class LegacyQueryAnalyzer(AbstractQueryAnalyzer):
         for row in await self.lookup_in_db(lookup_words):
             for trange in words[row.word_token.strip()]:
                 token, ttype = self.make_token(row)
         for row in await self.lookup_in_db(lookup_words):
             for trange in words[row.word_token.strip()]:
                 token, ttype = self.make_token(row)
-                if ttype == qmod.TokenType.CATEGORY:
+                if ttype == qmod.TokenType.NEAR_ITEM:
                     if trange.start == 0:
                     if trange.start == 0:
-                        query.add_token(trange, qmod.TokenType.CATEGORY, token)
+                        query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
                 elif ttype == qmod.TokenType.QUALIFIER:
                     query.add_token(trange, qmod.TokenType.QUALIFIER, token)
                     if trange.start == 0 or trange.end == query.num_token_slots():
                         token = copy(token)
                         token.penalty += 0.1 * (query.num_token_slots())
                 elif ttype == qmod.TokenType.QUALIFIER:
                     query.add_token(trange, qmod.TokenType.QUALIFIER, token)
                     if trange.start == 0 or trange.end == query.num_token_slots():
                         token = copy(token)
                         token.penalty += 0.1 * (query.num_token_slots())
-                        query.add_token(trange, qmod.TokenType.CATEGORY, token)
+                        query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
                 elif ttype != qmod.TokenType.PARTIAL or trange.start + 1 == trange.end:
                     query.add_token(trange, ttype, token)
 
                 elif ttype != qmod.TokenType.PARTIAL or trange.start + 1 == trange.end:
                     query.add_token(trange, ttype, token)
 
@@ -195,7 +195,7 @@ class LegacyQueryAnalyzer(AbstractQueryAnalyzer):
                 ttype = qmod.TokenType.POSTCODE
                 lookup_word = row.word_token[1:]
             else:
                 ttype = qmod.TokenType.POSTCODE
                 lookup_word = row.word_token[1:]
             else:
-                ttype = qmod.TokenType.CATEGORY if row.operator in ('in', 'near')\
+                ttype = qmod.TokenType.NEAR_ITEM if row.operator in ('in', 'near')\
                         else qmod.TokenType.QUALIFIER
                 lookup_word = row.word
         elif row.word_token.startswith(' '):
                         else qmod.TokenType.QUALIFIER
                 lookup_word = row.word
         elif row.word_token.startswith(' '):
index 4bf009a53a7add87b44cee2ac2508b72e1846f2b..ad1b69ef521dfd303ae1d5b95aa8629859feb10b 100644 (file)
@@ -46,7 +46,7 @@ class TokenType(enum.Enum):
     """ Country name or reference. """
     QUALIFIER = enum.auto()
     """ Special term used together with name (e.g. _Hotel_ Bellevue). """
     """ Country name or reference. """
     QUALIFIER = enum.auto()
     """ Special term used together with name (e.g. _Hotel_ Bellevue). """
-    CATEGORY = enum.auto()
+    NEAR_ITEM = enum.auto()
     """ Special term used as searchable object(e.g. supermarket in ...). """
 
 
     """ Special term used as searchable object(e.g. supermarket in ...). """
 
 
@@ -78,7 +78,7 @@ class PhraseType(enum.Enum):
             return not is_full_phrase or ttype != TokenType.QUALIFIER
         if self == PhraseType.AMENITY:
             return ttype in (TokenType.WORD, TokenType.PARTIAL)\
             return not is_full_phrase or ttype != TokenType.QUALIFIER
         if self == PhraseType.AMENITY:
             return ttype in (TokenType.WORD, TokenType.PARTIAL)\
-                   or (is_full_phrase and ttype == TokenType.CATEGORY)\
+                   or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\
                    or (not is_full_phrase and ttype == TokenType.QUALIFIER)
         if self == PhraseType.STREET:
             return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
                    or (not is_full_phrase and ttype == TokenType.QUALIFIER)
         if self == PhraseType.STREET:
             return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
index 3f0e737b004d623f5eef758a40b1a59e1d495806..d94d69039f0f602ab8c0aeda9e5c971a881353fd 100644 (file)
@@ -46,7 +46,7 @@ class TokenAssignment: # pylint: disable=too-many-instance-attributes
     housenumber: Optional[qmod.TokenRange] = None
     postcode: Optional[qmod.TokenRange] = None
     country: Optional[qmod.TokenRange] = None
     housenumber: Optional[qmod.TokenRange] = None
     postcode: Optional[qmod.TokenRange] = None
     country: Optional[qmod.TokenRange] = None
-    category: Optional[qmod.TokenRange] = None
+    near_item: Optional[qmod.TokenRange] = None
     qualifier: Optional[qmod.TokenRange] = None
 
 
     qualifier: Optional[qmod.TokenRange] = None
 
 
@@ -64,8 +64,8 @@ class TokenAssignment: # pylint: disable=too-many-instance-attributes
                 out.postcode = token.trange
             elif token.ttype == qmod.TokenType.COUNTRY:
                 out.country = token.trange
                 out.postcode = token.trange
             elif token.ttype == qmod.TokenType.COUNTRY:
                 out.country = token.trange
-            elif token.ttype == qmod.TokenType.CATEGORY:
-                out.category = token.trange
+            elif token.ttype == qmod.TokenType.NEAR_ITEM:
+                out.near_item = token.trange
             elif token.ttype == qmod.TokenType.QUALIFIER:
                 out.qualifier = token.trange
         return out
             elif token.ttype == qmod.TokenType.QUALIFIER:
                 out.qualifier = token.trange
         return out
@@ -109,7 +109,7 @@ class _TokenSequence:
         """
         # Country and category must be the final term for left-to-right
         return len(self.seq) > 1 and \
         """
         # Country and category must be the final term for left-to-right
         return len(self.seq) > 1 and \
-               self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.CATEGORY)
+               self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM)
 
 
     def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
 
 
     def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
@@ -165,22 +165,22 @@ class _TokenSequence:
         if ttype == qmod.TokenType.COUNTRY:
             return None if self.direction == -1 else 1
 
         if ttype == qmod.TokenType.COUNTRY:
             return None if self.direction == -1 else 1
 
-        if ttype == qmod.TokenType.CATEGORY:
+        if ttype == qmod.TokenType.NEAR_ITEM:
             return self.direction
 
         if ttype == qmod.TokenType.QUALIFIER:
             if self.direction == 1:
                 if (len(self.seq) == 1
             return self.direction
 
         if ttype == qmod.TokenType.QUALIFIER:
             if self.direction == 1:
                 if (len(self.seq) == 1
-                    and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.CATEGORY)) \
+                    and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \
                    or (len(self.seq) == 2
                    or (len(self.seq) == 2
-                       and self.seq[0].ttype == qmod.TokenType.CATEGORY
+                       and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM
                        and self.seq[1].ttype == qmod.TokenType.PARTIAL):
                     return 1
                 return None
             if self.direction == -1:
                 return -1
 
                        and self.seq[1].ttype == qmod.TokenType.PARTIAL):
                     return 1
                 return None
             if self.direction == -1:
                 return -1
 
-            tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.CATEGORY else self.seq
+            tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq
             if len(tempseq) == 0:
                 return 1
             if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
             if len(tempseq) == 0:
                 return 1
             if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
@@ -253,7 +253,7 @@ class _TokenSequence:
                 priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
                 if not self._adapt_penalty_from_priors(priors, 1):
                     return False
                 priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
                 if not self._adapt_penalty_from_priors(priors, 1):
                     return False
-            if any(t.ttype == qmod.TokenType.CATEGORY for t in self.seq):
+            if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq):
                 self.penalty += 1.0
 
         return True
                 self.penalty += 1.0
 
         return True
@@ -368,7 +368,7 @@ class _TokenSequence:
 
         # Postcode or country-only search
         if not base.address:
 
         # Postcode or country-only search
         if not base.address:
-            if not base.housenumber and (base.postcode or base.country or base.category):
+            if not base.housenumber and (base.postcode or base.country or base.near_item):
                 log().comment('postcode/country search')
                 yield dataclasses.replace(base, penalty=self.penalty)
         else:
                 log().comment('postcode/country search')
                 yield dataclasses.replace(base, penalty=self.penalty)
         else:
index 69a17412cf14170cd0a2c6a9209dab73676dc5b1..fe850ce902930a817981bd42c6c549fc5bd91ec3 100644 (file)
@@ -106,11 +106,11 @@ def test_query_struct_amenity_single_word():
     q.add_node(query.BreakType.END, query.PhraseType.NONE)
 
     q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
     q.add_node(query.BreakType.END, query.PhraseType.NONE)
 
     q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
-    q.add_token(query.TokenRange(0, 1), query.TokenType.CATEGORY, mktoken(2))
+    q.add_token(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM, mktoken(2))
     q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3))
 
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
     q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3))
 
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
-    assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 1
+    assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 1
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0
 
 
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0
 
 
@@ -121,14 +121,14 @@ def test_query_struct_amenity_two_words():
 
     for trange in [(0, 1), (1, 2)]:
         q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1))
 
     for trange in [(0, 1), (1, 2)]:
         q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1))
-        q.add_token(query.TokenRange(*trange), query.TokenType.CATEGORY, mktoken(2))
+        q.add_token(query.TokenRange(*trange), query.TokenType.NEAR_ITEM, mktoken(2))
         q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3))
 
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
         q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3))
 
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
-    assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 0
+    assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 0
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1
 
     assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1
     assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1
 
     assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1
-    assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.CATEGORY)) == 0
+    assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.NEAR_ITEM)) == 0
     assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1
 
     assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1
 
index 9d877b1db9ee5aa575f8c508cf94d25db5f8ffbf..e3feff0d31cb7e976ad7fe81a01f99d632233a33 100644 (file)
@@ -147,11 +147,11 @@ def test_postcode_with_address_with_full_word():
 
 @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
                                     {'near': '10,10'}])
 
 @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
                                     {'near': '10,10'}])
-def test_category_only(kwargs):
-    q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])])
+def test_near_item_only(kwargs):
+    q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
     builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
 
     builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
 
-    searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1))))
+    searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
 
     assert len(searches) == 1
 
 
     assert len(searches) == 1
 
@@ -163,11 +163,11 @@ def test_category_only(kwargs):
 
 @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
                                     {}])
 
 @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
                                     {}])
-def test_category_skipped(kwargs):
-    q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])])
+def test_near_item_skipped(kwargs):
+    q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
     builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
 
     builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
 
-    searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1))))
+    searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
 
     assert len(searches) == 0
 
 
     assert len(searches) == 0
 
@@ -284,13 +284,13 @@ def test_name_and_complex_address():
 
 
 def test_name_only_near_search():
 
 
 def test_name_only_near_search():
-    q = make_query([(1, TokenType.CATEGORY, [(88, 'g')])],
+    q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
                    [(2, TokenType.PARTIAL, [(1, 'a')]),
                     (2, TokenType.WORD, [(100, 'a')])])
     builder = SearchBuilder(q, SearchDetails())
 
     searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
                    [(2, TokenType.PARTIAL, [(1, 'a')]),
                     (2, TokenType.WORD, [(100, 'a')])])
     builder = SearchBuilder(q, SearchDetails())
 
     searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
-                                                  category=TokenRange(0, 1))))
+                                                  near_item=TokenRange(0, 1))))
 
     assert len(searches) == 1
     search = searches[0]
 
     assert len(searches) == 1
     search = searches[0]
index faf8137526106a0e5db77b325c0fa73f30b94a05..a88ca8b82e4facc800aa23c507f309f57c4c8311 100644 (file)
@@ -134,7 +134,7 @@ async def test_category_words_only_at_beginning(conn):
 
     assert query.num_token_slots() == 3
     assert len(query.nodes[0].starting) == 1
 
     assert query.num_token_slots() == 3
     assert len(query.nodes[0].starting) == 1
-    assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY
+    assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
     assert not query.nodes[2].starting
 
 
     assert not query.nodes[2].starting
 
 
@@ -148,9 +148,9 @@ async def test_qualifier_words(conn):
     query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
 
     assert query.num_token_slots() == 5
     query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
 
     assert query.num_token_slots() == 5
-    assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
+    assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER}
     assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
     assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
-    assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
+    assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER}
 
 
 @pytest.mark.asyncio
 
 
 @pytest.mark.asyncio
index cdea6ede7cd842fa0c0f25b6915b8c0fcb2f0fe9..507afaeceee8023eade86f5c2f1db7d382d44842 100644 (file)
@@ -212,7 +212,7 @@ async def test_category_words_only_at_beginning(conn):
 
     assert query.num_token_slots() == 3
     assert len(query.nodes[0].starting) == 1
 
     assert query.num_token_slots() == 3
     assert len(query.nodes[0].starting) == 1
-    assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY
+    assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
     assert not query.nodes[2].starting
 
 
     assert not query.nodes[2].starting
 
 
@@ -226,9 +226,9 @@ async def test_qualifier_words(conn):
     query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
 
     assert query.num_token_slots() == 5
     query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
 
     assert query.num_token_slots() == 5
-    assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
+    assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER}
     assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
     assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
-    assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
+    assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER}
 
 
 @pytest.mark.asyncio
 
 
 @pytest.mark.asyncio
index 6dc25b1e7507159dc2163d03ac13451d817559fe..2ed55a0f80afb06372ef27f6a49fcf840e3854cc 100644 (file)
@@ -76,11 +76,11 @@ def test_single_country_name():
 
 def test_single_word_poi_search():
     q = make_query((BreakType.START, PhraseType.NONE,
 
 def test_single_word_poi_search():
     q = make_query((BreakType.START, PhraseType.NONE,
-                    [(1, TokenType.CATEGORY),
+                    [(1, TokenType.NEAR_ITEM),
                      (1, TokenType.QUALIFIER)]))
 
     res = list(yield_token_assignments(q))
                      (1, TokenType.QUALIFIER)]))
 
     res = list(yield_token_assignments(q))
-    assert res == [TokenAssignment(category=TokenRange(0, 1))]
+    assert res == [TokenAssignment(near_item=TokenRange(0, 1))]
 
 
 @pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN])
 
 
 @pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN])
@@ -182,7 +182,7 @@ def test_country_housenumber_postcode():
 
 
 @pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY,
 
 
 @pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY,
-                                   TokenType.CATEGORY, TokenType.QUALIFIER])
+                                   TokenType.NEAR_ITEM, TokenType.QUALIFIER])
 def test_housenumber_with_only_special_terms(ttype):
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]),
                    (BreakType.WORD, PhraseType.NONE, [(2, ttype)]))
 def test_housenumber_with_only_special_terms(ttype):
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]),
                    (BreakType.WORD, PhraseType.NONE, [(2, ttype)]))
@@ -266,27 +266,27 @@ def test_postcode_with_designation_backwards():
                                       address=[TokenRange(0, 1)]))
 
 
                                       address=[TokenRange(0, 1)]))
 
 
-def test_category_at_beginning():
-    q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.CATEGORY)]),
+def test_near_item_at_beginning():
+    q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.NEAR_ITEM)]),
                    (BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
 
     check_assignments(yield_token_assignments(q),
                       TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
                    (BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
 
     check_assignments(yield_token_assignments(q),
                       TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
-                                      category=TokenRange(0, 1)))
+                                      near_item=TokenRange(0, 1)))
 
 
 
 
-def test_category_at_end():
+def test_near_item_at_end():
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
-                   (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]))
+                   (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)]))
 
     check_assignments(yield_token_assignments(q),
                       TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
 
     check_assignments(yield_token_assignments(q),
                       TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
-                                      category=TokenRange(1, 2)))
+                                      near_item=TokenRange(1, 2)))
 
 
 
 
-def test_category_in_middle():
+def test_near_item_in_middle():
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
     q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
-                   (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]),
+                   (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)]),
                    (BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
 
     check_assignments(yield_token_assignments(q))
                    (BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
 
     check_assignments(yield_token_assignments(q))