X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/29b02f9e56952bc30bbd71d7094181dae6817e69..0fb8eade136ea03e7853aca0795ca69833c33661:/test/python/test_tokenizer_legacy_icu.py diff --git a/test/python/test_tokenizer_legacy_icu.py b/test/python/test_tokenizer_legacy_icu.py index d8ca2f22..ed489662 100644 --- a/test/python/test_tokenizer_legacy_icu.py +++ b/test/python/test_tokenizer_legacy_icu.py @@ -2,12 +2,21 @@ Tests for Legacy ICU tokenizer. """ import shutil +import yaml import pytest from nominatim.tokenizer import legacy_icu_tokenizer +from nominatim.tokenizer.icu_name_processor import ICUNameProcessorRules +from nominatim.tokenizer.icu_rule_loader import ICURuleLoader from nominatim.db import properties +from mock_icu_word_table import MockIcuWordTable + +@pytest.fixture +def word_table(temp_db_conn): + return MockIcuWordTable(temp_db_conn) + @pytest.fixture def test_config(def_config, tmp_path): @@ -18,8 +27,8 @@ def test_config(def_config, tmp_path): sqldir.mkdir() (sqldir / 'tokenizer').mkdir() (sqldir / 'tokenizer' / 'legacy_icu_tokenizer.sql').write_text("SELECT 'a'") - shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'), - str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql')) + shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'), + str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql')) def_config.lib_dir.sql = sqldir @@ -40,16 +49,10 @@ def tokenizer_factory(dsn, tmp_path, property_table, @pytest.fixture def db_prop(temp_db_conn): def _get_db_property(name): - return properties.get_property(temp_db_conn, - getattr(legacy_icu_tokenizer, name)) + return properties.get_property(temp_db_conn, name) return _get_db_property -@pytest.fixture -def tokenizer_setup(tokenizer_factory, test_config): - tok = tokenizer_factory() - tok.init_new_db(test_config) - @pytest.fixture def analyzer(tokenizer_factory, test_config, monkeypatch, @@ -62,9 +65,15 @@ def analyzer(tokenizer_factory, test_config, monkeypatch, tok.init_new_db(test_config) monkeypatch.undo() - def _mk_analyser(trans=':: upper();', abbr=(('STREET', 'ST'), )): - tok.transliteration = trans - tok.abbreviations = abbr + def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',), + variants=('~gasse -> gasse', 'street => st', )): + cfgfile = tmp_path / 'analyser_test_config.yaml' + with cfgfile.open('w') as stream: + cfgstr = {'normalization' : list(norm), + 'transliteration' : list(trans), + 'variants' : [ {'words': list(variants)}]} + yaml.dump(cfgstr, stream) + tok.naming_rules = ICUNameProcessorRules(loader=ICURuleLoader(cfgfile)) return tok.name_analyzer() @@ -72,10 +81,56 @@ def analyzer(tokenizer_factory, test_config, monkeypatch, @pytest.fixture -def getorcreate_term_id(temp_db_cursor): - temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_term_id(lookup_term TEXT) - RETURNS INTEGER AS $$ - SELECT nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""") +def getorcreate_full_word(temp_db_cursor): + temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_full_word( + norm_term TEXT, lookup_terms TEXT[], + OUT full_token INT, + OUT partial_tokens INT[]) + AS $$ +DECLARE + partial_terms TEXT[] = '{}'::TEXT[]; + term TEXT; + term_id INTEGER; + term_count INTEGER; +BEGIN + SELECT min(word_id) INTO full_token + FROM word WHERE info->>'word' = norm_term and type = 'W'; + + IF full_token IS NULL THEN + full_token := nextval('seq_word'); + INSERT INTO word (word_id, word_token, type, info) + SELECT full_token, lookup_term, 'W', + json_build_object('word', norm_term, 'count', 0) + FROM unnest(lookup_terms) as lookup_term; + END IF; + + FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP + term := trim(term); + IF NOT (ARRAY[term] <@ partial_terms) THEN + partial_terms := partial_terms || term; + END IF; + END LOOP; + + partial_tokens := '{}'::INT[]; + FOR term IN SELECT unnest(partial_terms) LOOP + SELECT min(word_id), max(info->>'count') INTO term_id, term_count + FROM word WHERE word_token = term and type = 'w'; + + IF term_id IS NULL THEN + term_id := nextval('seq_word'); + term_count := 0; + INSERT INTO word (word_id, word_token, type, info) + VALUES (term_id, term, 'w', json_build_object('count', term_count)); + END IF; + + IF NOT (ARRAY[term_id] <@ partial_tokens) THEN + partial_tokens := partial_tokens || term_id; + END IF; + END LOOP; +END; +$$ +LANGUAGE plpgsql; + """) @pytest.fixture @@ -91,19 +146,37 @@ def test_init_new(tokenizer_factory, test_config, monkeypatch, db_prop): tok = tokenizer_factory() tok.init_new_db(test_config) - assert db_prop('DBCFG_NORMALIZATION') == ':: lower();' - assert db_prop('DBCFG_TRANSLITERATION') is not None - assert db_prop('DBCFG_ABBREVIATIONS') is not None + assert db_prop(legacy_icu_tokenizer.DBCFG_TERM_NORMALIZATION) == ':: lower();' + assert db_prop(legacy_icu_tokenizer.DBCFG_MAXWORDFREQ) is not None + + +def test_init_word_table(tokenizer_factory, test_config, place_row, word_table): + place_row(names={'name' : 'Test Area', 'ref' : '52'}) + place_row(names={'name' : 'No Area'}) + place_row(names={'name' : 'Holzstrasse'}) + + tok = tokenizer_factory() + tok.init_new_db(test_config) + + assert word_table.get_partial_words() == {('test', 1), + ('no', 1), ('area', 2), + ('holz', 1), ('strasse', 1), + ('str', 1)} -def test_init_from_project(tokenizer_setup, tokenizer_factory): +def test_init_from_project(monkeypatch, test_config, tokenizer_factory): + monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();') + monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '90300') tok = tokenizer_factory() + tok.init_new_db(test_config) + monkeypatch.undo() + tok = tokenizer_factory() tok.init_from_project() - assert tok.normalization is not None - assert tok.transliteration is not None - assert tok.abbreviations is not None + assert tok.naming_rules is not None + assert tok.term_normalization == ':: lower();' + assert tok.max_word_frequency == '90300' def test_update_sql_functions(db_prop, temp_db_cursor, @@ -114,7 +187,7 @@ def test_update_sql_functions(db_prop, temp_db_cursor, tok.init_new_db(test_config) monkeypatch.undo() - assert db_prop('DBCFG_MAXWORDFREQ') == '1133' + assert db_prop(legacy_icu_tokenizer.DBCFG_MAXWORDFREQ) == '1133' table_factory('test', 'txt TEXT') @@ -127,18 +200,11 @@ def test_update_sql_functions(db_prop, temp_db_cursor, assert test_content == set((('1133', ), )) -def test_make_standard_word(analyzer): - with analyzer(abbr=(('STREET', 'ST'), ('tiny', 't'))) as anl: - assert anl.make_standard_word('tiny street') == 'TINY ST' - - with analyzer(abbr=(('STRASSE', 'STR'), ('STR', 'ST'))) as anl: - assert anl.make_standard_word('Hauptstrasse') == 'HAUPTST' - - -def test_make_standard_hnr(analyzer): - with analyzer(abbr=(('IV', '4'),)) as anl: - assert anl._make_standard_hnr('345') == '345' - assert anl._make_standard_hnr('iv') == 'IV' +def test_normalize_postcode(analyzer): + with analyzer() as anl: + anl.normalize_postcode('123') == '123' + anl.normalize_postcode('ab-34 ') == 'AB-34' + anl.normalize_postcode('38 Б') == '38 Б' def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table): @@ -168,20 +234,20 @@ def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_t def test_update_special_phrase_empty_table(analyzer, word_table): with analyzer() as anl: anl.update_special_phrases([ - ("König bei", "amenity", "royal", "near"), - ("Könige", "amenity", "royal", "-"), + ("König bei", "amenity", "royal", "near"), + ("Könige ", "amenity", "royal", "-"), ("street", "highway", "primary", "in") ], True) assert word_table.get_special() \ - == {(' KÖNIG BEI', 'könig bei', 'amenity', 'royal', 'near'), - (' KÖNIGE', 'könige', 'amenity', 'royal', None), - (' ST', 'street', 'highway', 'primary', 'in')} + == {('KÖNIG BEI', 'König bei', 'amenity', 'royal', 'near'), + ('KÖNIGE', 'Könige', 'amenity', 'royal', None), + ('STREET', 'street', 'highway', 'primary', 'in')} def test_update_special_phrase_delete_all(analyzer, word_table): - word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in') - word_table.add_special(' BAR', 'bar', 'highway', 'road', None) + word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in') + word_table.add_special('BAR', 'bar', 'highway', 'road', None) assert word_table.count_special() == 2 @@ -192,8 +258,8 @@ def test_update_special_phrase_delete_all(analyzer, word_table): def test_update_special_phrases_no_replace(analyzer, word_table): - word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in') - word_table.add_special(' BAR', 'bar', 'highway', 'road', None) + word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in') + word_table.add_special('BAR', 'bar', 'highway', 'road', None) assert word_table.count_special() == 2 @@ -204,8 +270,8 @@ def test_update_special_phrases_no_replace(analyzer, word_table): def test_update_special_phrase_modify(analyzer, word_table): - word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in') - word_table.add_special(' BAR', 'bar', 'highway', 'road', None) + word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in') + word_table.add_special('BAR', 'bar', 'highway', 'road', None) assert word_table.count_special() == 2 @@ -217,71 +283,194 @@ def test_update_special_phrase_modify(analyzer, word_table): ], True) assert word_table.get_special() \ - == {(' PRISON', 'prison', 'amenity', 'prison', 'in'), - (' BAR', 'bar', 'highway', 'road', None), - (' GARDEN', 'garden', 'leisure', 'garden', 'near')} + == {('PRISON', 'prison', 'amenity', 'prison', 'in'), + ('BAR', 'bar', 'highway', 'road', None), + ('GARDEN', 'garden', 'leisure', 'garden', 'near')} -def test_process_place_names(analyzer, getorcreate_term_id): +def test_add_country_names_new(analyzer, word_table): with analyzer() as anl: - info = anl.process_place({'name' : {'name' : 'Soft bAr', 'ref': '34'}}) + anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'}) + + assert word_table.get_country() == {('es', 'ESPAGÑA'), ('es', 'SPAIN')} - assert info['names'] == '{1,2,3,4,5}' +def test_add_country_names_extend(analyzer, word_table): + word_table.add_country('ch', 'SCHWEIZ') -@pytest.mark.parametrize('sep', [',' , ';']) -def test_full_names_with_separator(analyzer, getorcreate_term_id, sep): with analyzer() as anl: - names = anl._compute_full_names({'name' : sep.join(('New York', 'Big Apple'))}) + anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'}) - assert names == set(('NEW YORK', 'BIG APPLE')) + assert word_table.get_country() == {('ch', 'SCHWEIZ'), ('ch', 'SUISSE')} -def test_full_names_with_bracket(analyzer, getorcreate_term_id): - with analyzer() as anl: - names = anl._compute_full_names({'name' : 'Houseboat (left)'}) +class TestPlaceNames: - assert names == set(('HOUSEBOAT (LEFT)', 'HOUSEBOAT')) + @pytest.fixture(autouse=True) + def setup(self, analyzer, getorcreate_full_word): + with analyzer() as anl: + self.analyzer = anl + yield anl -@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345']) -def test_process_place_postcode(analyzer, word_table, pcode): - with analyzer() as anl: - anl.process_place({'address': {'postcode' : pcode}}) + def expect_name_terms(self, info, *expected_terms): + tokens = self.analyzer.get_word_token_info(expected_terms) + print (tokens) + for token in tokens: + assert token[2] is not None, "No token for {0}".format(token) - assert word_table.get_postcodes() == {pcode, } + assert eval(info['names']) == set((t[2] for t in tokens)) -@pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836']) -def test_process_place_bad_postcode(analyzer, word_table, pcode): - with analyzer() as anl: - anl.process_place({'address': {'postcode' : pcode}}) + def test_simple_names(self): + info = self.analyzer.process_place({'name': {'name': 'Soft bAr', 'ref': '34'}}) - assert not word_table.get_postcodes() + self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34') -@pytest.mark.parametrize('hnr', ['123a', '1', '101']) -def test_process_place_housenumbers_simple(analyzer, hnr, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'housenumber' : hnr}}) + @pytest.mark.parametrize('sep', [',' , ';']) + def test_names_with_separator(self, sep): + info = self.analyzer.process_place({'name': {'name': sep.join(('New York', 'Big Apple'))}}) - assert info['hnr'] == hnr.upper() - assert info['hnr_tokens'] == "{-1}" + self.expect_name_terms(info, '#New York', '#Big Apple', + 'new', 'york', 'big', 'apple') -def test_process_place_housenumbers_lists(analyzer, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'conscriptionnumber' : '1; 2;3'}}) + def test_full_names_with_bracket(self): + info = self.analyzer.process_place({'name': {'name': 'Houseboat (left)'}}) - assert set(info['hnr'].split(';')) == set(('1', '2', '3')) - assert info['hnr_tokens'] == "{-1,-2,-3}" + self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat', + 'houseboat', 'left') -def test_process_place_housenumbers_duplicates(analyzer, getorcreate_hnr_id): - with analyzer() as anl: - info = anl.process_place({'address': {'housenumber' : '134', - 'conscriptionnumber' : '134', - 'streetnumber' : '99a'}}) + def test_country_name(self, word_table): + info = self.analyzer.process_place({'name': {'name': 'Norge'}, + 'country_feature': 'no'}) + + self.expect_name_terms(info, '#norge', 'norge') + assert word_table.get_country() == {('no', 'NORGE')} + + +class TestPlaceAddress: + + @pytest.fixture(autouse=True) + def setup(self, analyzer, getorcreate_full_word): + with analyzer(trans=(":: upper()", "'🜵' > ' '")) as anl: + self.analyzer = anl + yield anl + + + def process_address(self, **kwargs): + return self.analyzer.process_place({'address': kwargs}) + + + def name_token_set(self, *expected_terms): + tokens = self.analyzer.get_word_token_info(expected_terms) + for token in tokens: + assert token[2] is not None, "No token for {0}".format(token) + + return set((t[2] for t in tokens)) + + + @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345']) + def test_process_place_postcode(self, word_table, pcode): + self.process_address(postcode=pcode) + + assert word_table.get_postcodes() == {pcode, } + + + @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836']) + def test_process_place_bad_postcode(self, word_table, pcode): + self.process_address(postcode=pcode) + + assert not word_table.get_postcodes() + + + @pytest.mark.parametrize('hnr', ['123a', '1', '101']) + def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id): + info = self.process_address(housenumber=hnr) + + assert info['hnr'] == hnr.upper() + assert info['hnr_tokens'] == "{-1}" + + + def test_process_place_housenumbers_lists(self, getorcreate_hnr_id): + info = self.process_address(conscriptionnumber='1; 2;3') + + assert set(info['hnr'].split(';')) == set(('1', '2', '3')) + assert info['hnr_tokens'] == "{-1,-2,-3}" + + + def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id): + info = self.process_address(housenumber='134', + conscriptionnumber='134', + streetnumber='99a') + + assert set(info['hnr'].split(';')) == set(('134', '99A')) + assert info['hnr_tokens'] == "{-1,-2}" + + + def test_process_place_housenumbers_cached(self, getorcreate_hnr_id): + info = self.process_address(housenumber="45") + assert info['hnr_tokens'] == "{-1}" + + info = self.process_address(housenumber="46") + assert info['hnr_tokens'] == "{-2}" + + info = self.process_address(housenumber="41;45") + assert eval(info['hnr_tokens']) == {-1, -3} + + info = self.process_address(housenumber="41") + assert eval(info['hnr_tokens']) == {-3} + + + def test_process_place_street(self): + info = self.process_address(street='Grand Road') + + assert eval(info['street']) == self.name_token_set('#GRAND ROAD') + + + def test_process_place_street_empty(self): + info = self.process_address(street='🜵') + + assert 'street' not in info + + + def test_process_place_place(self): + info = self.process_address(place='Honu Lulu') + + assert eval(info['place_search']) == self.name_token_set('#HONU LULU', + 'HONU', 'LULU') + assert eval(info['place_match']) == self.name_token_set('#HONU LULU') + + + def test_process_place_place_empty(self): + info = self.process_address(place='🜵') + + assert 'place_search' not in info + assert 'place_match' not in info + + + def test_process_place_address_terms(self): + info = self.process_address(country='de', city='Zwickau', state='Sachsen', + suburb='Zwickau', street='Hauptstr', + full='right behind the church') + + city_full = self.name_token_set('#ZWICKAU') + city_all = self.name_token_set('#ZWICKAU', 'ZWICKAU') + state_full = self.name_token_set('#SACHSEN') + state_all = self.name_token_set('#SACHSEN', 'SACHSEN') + + result = {k: [eval(v[0]), eval(v[1])] for k,v in info['addr'].items()} + + assert result == {'city': [city_all, city_full], + 'suburb': [city_all, city_full], + 'state': [state_all, state_full]} + + + def test_process_place_address_terms_empty(self): + info = self.process_address(country='de', city=' ', street='Hauptstr', + full='right behind the church') + + assert 'addr' not in info - assert set(info['hnr'].split(';')) == set(('134', '99A')) - assert info['hnr_tokens'] == "{-1,-2}"