From 9ff4f66f5555cc812c0e8f10ef007cdeae3b9836 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Thu, 10 Jun 2021 17:18:23 +0200 Subject: [PATCH] complete tests for icu tokenizer --- nominatim/tokenizer/icu_name_processor.py | 13 +- nominatim/tokenizer/legacy_icu_tokenizer.py | 24 ++- test/python/conftest.py | 1 + test/python/mocks.py | 7 + .../test_tokenizer_icu_name_processor.py | 13 ++ test/python/test_tokenizer_legacy_icu.py | 191 ++++++++++++++---- 6 files changed, 203 insertions(+), 46 deletions(-) diff --git a/nominatim/tokenizer/icu_name_processor.py b/nominatim/tokenizer/icu_name_processor.py index 4d5975c3..ed0a20d9 100644 --- a/nominatim/tokenizer/icu_name_processor.py +++ b/nominatim/tokenizer/icu_name_processor.py @@ -101,10 +101,19 @@ class ICUNameProcessor: else: pos += 1 + results = [] + if startpos == 0: - return [self.to_ascii.transliterate(norm_name)] + trans_name = self.to_ascii.transliterate(norm_name).strip() + if trans_name: + results.append(trans_name) + else: + for variant in variants: + trans_name = self.to_ascii.transliterate(variant + baseform[startpos:pos]).strip() + if trans_name: + results.append(trans_name) - return [self.to_ascii.transliterate(v + baseform[startpos:pos]).strip() for v in variants] + return results def get_search_normalized(self, name): diff --git a/nominatim/tokenizer/legacy_icu_tokenizer.py b/nominatim/tokenizer/legacy_icu_tokenizer.py index 6148e459..5f83b73d 100644 --- a/nominatim/tokenizer/legacy_icu_tokenizer.py +++ b/nominatim/tokenizer/legacy_icu_tokenizer.py @@ -123,7 +123,7 @@ class LegacyICUTokenizer: """ return LegacyICUNameAnalyzer(self.dsn, ICUNameProcessor(self.naming_rules)) - + # pylint: disable=missing-format-attribute def _install_php(self, phpdir): """ Install the php script for the tokenizer. """ @@ -134,7 +134,7 @@ class LegacyICUTokenizer: @define('CONST_Term_Normalization_Rules', "{0.term_normalization}"); @define('CONST_Transliteration', "{0.naming_rules.search_rules}"); require_once('{1}/tokenizer/legacy_icu_tokenizer.php'); - """.format(self, phpdir))) # pylint: disable=missing-format-attribute + """.format(self, phpdir))) def _save_config(self, config): @@ -166,9 +166,11 @@ class LegacyICUTokenizer: cur.execute("SELECT svals(name) as v, count(*) FROM place GROUP BY v") for name, cnt in cur: + terms = set() for word in name_proc.get_variants_ascii(name_proc.get_normalized(name)): - for term in word.split(): - words[term] += cnt + terms.update(word.split()) + for term in terms: + words[term] += cnt # copy them back into the word table with CopyBuffer() as copystr: @@ -446,6 +448,9 @@ class LegacyICUNameAnalyzer: full, part = self._cache.names.get(norm_name, (None, None)) if full is None: variants = self.name_processor.get_variants_ascii(norm_name) + if not variants: + continue + with self.conn.cursor() as cur: cur.execute("SELECT (getorcreate_full_word(%s, %s)).*", (norm_name, variants)) @@ -465,12 +470,13 @@ class LegacyICUNameAnalyzer: given dictionary of names. """ full_names = set() - for name in (n for ns in names.values() for n in re.split('[;,]', ns)): - full_names.add(name.strip()) + for name in (n.strip() for ns in names.values() for n in re.split('[;,]', ns)): + if name: + full_names.add(name) - brace_idx = name.find('(') - if brace_idx >= 0: - full_names.add(name[:brace_idx].strip()) + brace_idx = name.find('(') + if brace_idx >= 0: + full_names.add(name[:brace_idx].strip()) return full_names diff --git a/test/python/conftest.py b/test/python/conftest.py index 9a43a67e..1fca4b62 100644 --- a/test/python/conftest.py +++ b/test/python/conftest.py @@ -173,6 +173,7 @@ def place_row(place_table, temp_db_cursor): """ A factory for rows in the place table. The table is created as a prerequisite to the fixture. """ + psycopg2.extras.register_hstore(temp_db_cursor) idseq = itertools.count(1001) def _insert(osm_type='N', osm_id=None, cls='amenity', typ='cafe', names=None, admin_level=None, address=None, extratags=None, geom=None): diff --git a/test/python/mocks.py b/test/python/mocks.py index e95d5772..f9faaa93 100644 --- a/test/python/mocks.py +++ b/test/python/mocks.py @@ -98,6 +98,13 @@ class MockWordTable: WHERE class = 'place' and type = 'postcode'""") return set((row[0] for row in cur)) + def get_partial_words(self): + with self.conn.cursor() as cur: + cur.execute("""SELECT word_token, search_name_count FROM word + WHERE class is null and country_code is null + and not word_token like ' %'""") + return set((tuple(row) for row in cur)) + class MockPlacexTable: """ A placex table for testing. diff --git a/test/python/test_tokenizer_icu_name_processor.py b/test/python/test_tokenizer_icu_name_processor.py index 817f9072..73636f93 100644 --- a/test/python/test_tokenizer_icu_name_processor.py +++ b/test/python/test_tokenizer_icu_name_processor.py @@ -16,12 +16,14 @@ def cfgfile(tmp_path, suffix='.yaml'): content = dedent("""\ normalization: - ":: NFD ()" + - "'🜳' > ' '" - "[[:Nonspacing Mark:] [:Cf:]] >" - ":: lower ()" - "[[:Punctuation:][:Space:]]+ > ' '" - ":: NFC ()" transliteration: - ":: Latin ()" + - "'🜵' > ' '" """) content += "compound_suffixes:\n" content += '\n'.join((" - " + s for s in suffixes)) + '\n' @@ -52,6 +54,17 @@ def test_simple_variants(cfgfile): assert get_normalized_variants(proc, "hallo") == ['hallo'] +def test_variants_empty(cfgfile): + fpath = cfgfile([], ['saint => 🜵', 'street => st']) + + rules = ICUNameProcessorRules(loader=ICURuleLoader(fpath)) + proc = ICUNameProcessor(rules) + + assert get_normalized_variants(proc, '🜵') == [] + assert get_normalized_variants(proc, '🜳') == [] + assert get_normalized_variants(proc, 'saint') == ['saint'] + + def test_multiple_replacements(cfgfile): fpath = cfgfile([], ['saint => s,st', 'street => st']) diff --git a/test/python/test_tokenizer_legacy_icu.py b/test/python/test_tokenizer_legacy_icu.py index 0f9230ac..b687d0e4 100644 --- a/test/python/test_tokenizer_legacy_icu.py +++ b/test/python/test_tokenizer_legacy_icu.py @@ -143,6 +143,20 @@ def test_init_new(tokenizer_factory, test_config, monkeypatch, db_prop): assert db_prop(legacy_icu_tokenizer.DBCFG_MAXWORDFREQ) is not None +def test_init_word_table(tokenizer_factory, test_config, place_row, word_table): + place_row(names={'name' : 'Test Area', 'ref' : '52'}) + place_row(names={'name' : 'No Area'}) + place_row(names={'name' : 'Holzstrasse'}) + + tok = tokenizer_factory() + tok.init_new_db(test_config) + + assert word_table.get_partial_words() == {('te', 1), ('st', 1), ('52', 1), + ('no', 1), ('area', 2), + ('holz', 1), ('strasse', 1), + ('str', 1)} + + def test_init_from_project(monkeypatch, test_config, tokenizer_factory): monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();') monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '90300') @@ -179,10 +193,11 @@ def test_update_sql_functions(db_prop, temp_db_cursor, assert test_content == set((('1133', ), )) -def test_make_standard_hnr(analyzer): - with analyzer(abbr=('IV => 4',)) as anl: - assert anl._make_standard_hnr('345') == '345' - assert anl._make_standard_hnr('iv') == 'IV' +def test_normalize_postcode(analyzer): + with analyzer() as anl: + anl.normalize_postcode('123') == '123' + anl.normalize_postcode('ab-34 ') == 'AB-34' + anl.normalize_postcode('38 Б') == '38 Б' def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table): @@ -266,6 +281,22 @@ def test_update_special_phrase_modify(analyzer, word_table): (' GARDEN', 'garden', 'leisure', 'garden', 'near')} +def test_add_country_names_new(analyzer, word_table): + with analyzer() as anl: + anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'}) + + assert word_table.get_country() == {('es', ' ESPAGÑA'), ('es', ' SPAIN')} + + +def test_add_country_names_extend(analyzer, word_table): + word_table.add_country('ch', ' SCHWEIZ') + + with analyzer() as anl: + anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'}) + + assert word_table.get_country() == {('ch', ' SCHWEIZ'), ('ch', ' SUISSE')} + + class TestPlaceNames: @pytest.fixture(autouse=True) @@ -284,64 +315,154 @@ class TestPlaceNames: def test_simple_names(self): - info = self.analyzer.process_place({'name' : {'name' : 'Soft bAr', 'ref': '34'}}) + info = self.analyzer.process_place({'name': {'name': 'Soft bAr', 'ref': '34'}}) self.expect_name_terms(info, '#Soft bAr', '#34','Soft', 'bAr', '34') @pytest.mark.parametrize('sep', [',' , ';']) def test_names_with_separator(self, sep): - info = self.analyzer.process_place({'name' : {'name' : sep.join(('New York', 'Big Apple'))}}) + info = self.analyzer.process_place({'name': {'name': sep.join(('New York', 'Big Apple'))}}) self.expect_name_terms(info, '#New York', '#Big Apple', 'new', 'york', 'big', 'apple') def test_full_names_with_bracket(self): - info = self.analyzer.process_place({'name' : {'name' : 'Houseboat (left)'}}) + info = self.analyzer.process_place({'name': {'name': 'Houseboat (left)'}}) self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat', 'houseboat', 'left') -@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345']) -def test_process_place_postcode(analyzer, word_table, pcode): - with analyzer() as anl: - anl.process_place({'address': {'postcode' : pcode}}) + def test_country_name(self, word_table): + info = self.analyzer.process_place({'name': {'name': 'Norge'}, + 'country_feature': 'no'}) - assert word_table.get_postcodes() == {pcode, } + self.expect_name_terms(info, '#norge', 'norge') + assert word_table.get_country() == {('no', ' NORGE')} -@pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836']) -def test_process_place_bad_postcode(analyzer, word_table, pcode): - with analyzer() as anl: - anl.process_place({'address': {'postcode' : pcode}}) +class TestPlaceAddress: + + @pytest.fixture(autouse=True) + def setup(self, analyzer, getorcreate_full_word): + with analyzer(trans=(":: upper()", "'🜵' > ' '")) as anl: + self.analyzer = anl + yield anl - assert not word_table.get_postcodes() + def process_address(self, **kwargs): + return self.analyzer.process_place({'address': kwargs}) -@pytest.mark.parametrize('hnr', ['123a', '1', '101']) -def test_process_place_housenumbers_simple(analyzer, hnr, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'housenumber' : hnr}}) - assert info['hnr'] == hnr.upper() - assert info['hnr_tokens'] == "{-1}" + def name_token_set(self, *expected_terms): + tokens = self.analyzer.get_word_token_info(expected_terms) + for token in tokens: + assert token[2] is not None, "No token for {0}".format(token) + + return set((t[2] for t in tokens)) -def test_process_place_housenumbers_lists(analyzer, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'conscriptionnumber' : '1; 2;3'}}) + @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345']) + def test_process_place_postcode(self, word_table, pcode): + self.process_address(postcode=pcode) - assert set(info['hnr'].split(';')) == set(('1', '2', '3')) - assert info['hnr_tokens'] == "{-1,-2,-3}" + assert word_table.get_postcodes() == {pcode, } -def test_process_place_housenumbers_duplicates(analyzer, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'housenumber' : '134', - 'conscriptionnumber' : '134', - 'streetnumber' : '99a'}}) + @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836']) + def test_process_place_bad_postcode(self, word_table, pcode): + self.process_address(postcode=pcode) + + assert not word_table.get_postcodes() + + + @pytest.mark.parametrize('hnr', ['123a', '1', '101']) + def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id): + info = self.process_address(housenumber=hnr) + + assert info['hnr'] == hnr.upper() + assert info['hnr_tokens'] == "{-1}" + + + def test_process_place_housenumbers_lists(self, getorcreate_hnr_id): + info = self.process_address(conscriptionnumber='1; 2;3') + + assert set(info['hnr'].split(';')) == set(('1', '2', '3')) + assert info['hnr_tokens'] == "{-1,-2,-3}" + + + def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id): + info = self.process_address(housenumber='134', + conscriptionnumber='134', + streetnumber='99a') + + assert set(info['hnr'].split(';')) == set(('134', '99A')) + assert info['hnr_tokens'] == "{-1,-2}" + + + def test_process_place_housenumbers_cached(self, getorcreate_hnr_id): + info = self.process_address(housenumber="45") + assert info['hnr_tokens'] == "{-1}" + + info = self.process_address(housenumber="46") + assert info['hnr_tokens'] == "{-2}" + + info = self.process_address(housenumber="41;45") + assert eval(info['hnr_tokens']) == {-1, -3} + + info = self.process_address(housenumber="41") + assert eval(info['hnr_tokens']) == {-3} + + + def test_process_place_street(self): + info = self.process_address(street='Grand Road') + + assert eval(info['street']) == self.name_token_set('#GRAND ROAD') + + + def test_process_place_street_empty(self): + info = self.process_address(street='🜵') + + assert 'street' not in info + + + def test_process_place_place(self): + info = self.process_address(place='Honu Lulu') + + assert eval(info['place_search']) == self.name_token_set('#HONU LULU', + 'HONU', 'LULU') + assert eval(info['place_match']) == self.name_token_set('#HONU LULU') + + + def test_process_place_place_empty(self): + info = self.process_address(place='🜵') + + assert 'place_search' not in info + assert 'place_match' not in info + + + def test_process_place_address_terms(self): + info = self.process_address(country='de', city='Zwickau', state='Sachsen', + suburb='Zwickau', street='Hauptstr', + full='right behind the church') + + city_full = self.name_token_set('#ZWICKAU') + city_all = self.name_token_set('#ZWICKAU', 'ZWICKAU') + state_full = self.name_token_set('#SACHSEN') + state_all = self.name_token_set('#SACHSEN', 'SACHSEN') + + result = {k: [eval(v[0]), eval(v[1])] for k,v in info['addr'].items()} + + assert result == {'city': [city_all, city_full], + 'suburb': [city_all, city_full], + 'state': [state_all, state_full]} + + + def test_process_place_address_terms_empty(self): + info = self.process_address(country='de', city=' ', street='Hauptstr', + full='right behind the church') + + assert 'addr' not in info - assert set(info['hnr'].split(';')) == set(('134', '99A')) - assert info['hnr_tokens'] == "{-1,-2}" -- 2.39.5