1 # SPDX-License-Identifier: GPL-3.0-or-later
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2024 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Test for legacy tokenizer.
15 from nominatim_db.data.place_info import PlaceInfo
16 from nominatim_db.tokenizer import legacy_tokenizer
17 from nominatim_db.db import properties
18 from nominatim_db.errors import UsageError
20 from mock_legacy_word_table import MockLegacyWordTable
22 # Force use of legacy word table
24 def word_table(temp_db_conn):
25 return MockLegacyWordTable(temp_db_conn)
29 def test_config(project_env, tmp_path):
30 module_dir = tmp_path / 'module_src'
32 (module_dir / 'nominatim.so').write_text('TEST nominatim.so')
34 project_env.lib_dir.module = module_dir
36 sqldir = tmp_path / 'sql'
38 (sqldir / 'tokenizer').mkdir()
40 # Get the original SQL but replace make_standard_name to avoid module use.
41 init_sql = (project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql').read_text()
42 for fn in ('transliteration', 'gettokenstring'):
43 init_sql = re.sub(f'CREATE OR REPLACE FUNCTION {fn}[^;]*;',
44 '', init_sql, re.DOTALL)
46 CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
47 RETURNS TEXT AS $$ SELECT lower(name); $$ LANGUAGE SQL;
50 # Also load util functions. Some are needed by the tokenizer.
51 init_sql += (project_env.lib_dir.sql / 'functions' / 'utils.sql').read_text()
52 (sqldir / 'tokenizer' / 'legacy_tokenizer.sql').write_text(init_sql)
54 (sqldir / 'words.sql').write_text("SELECT 'a'")
56 shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
57 str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql'))
59 project_env.lib_dir.sql = sqldir
60 project_env.lib_dir.data = sqldir
66 def tokenizer_factory(dsn, tmp_path, property_table):
67 (tmp_path / 'tokenizer').mkdir()
70 return legacy_tokenizer.create(dsn, tmp_path / 'tokenizer')
76 def tokenizer_setup(tokenizer_factory, test_config, monkeypatch, sql_preprocessor):
77 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
78 tok = tokenizer_factory()
79 tok.init_new_db(test_config)
83 def analyzer(tokenizer_factory, test_config, monkeypatch, sql_preprocessor,
84 word_table, temp_db_with_extensions, tmp_path):
85 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
86 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
87 tok = tokenizer_factory()
88 tok.init_new_db(test_config)
91 with tok.name_analyzer() as analyzer:
96 def make_standard_name(temp_db_cursor):
97 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
98 RETURNS TEXT AS $$ SELECT '#' || lower(name) || '#'; $$ LANGUAGE SQL""")
102 def create_postcode_id(temp_db_cursor):
103 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_postcode_id(postcode TEXT)
104 RETURNS BOOLEAN AS $$
105 INSERT INTO word (word_token, word, class, type)
106 VALUES (' ' || postcode, postcode, 'place', 'postcode')
111 def test_init_new(tokenizer_factory, test_config, monkeypatch,
112 temp_db_conn, sql_preprocessor):
113 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', 'xxvv')
114 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
116 tok = tokenizer_factory()
117 tok.init_new_db(test_config)
119 assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_NORMALIZATION) == 'xxvv'
121 outfile = test_config.project_dir / 'module' / 'nominatim.so'
123 assert outfile.exists()
124 assert outfile.read_text() == 'TEST nominatim.so'
125 assert outfile.stat().st_mode == 33261
128 def test_init_module_load_failed(tokenizer_factory, test_config):
129 tok = tokenizer_factory()
131 with pytest.raises(UsageError):
132 tok.init_new_db(test_config)
135 def test_init_module_custom(tokenizer_factory, test_config,
136 monkeypatch, tmp_path, sql_preprocessor):
137 module_dir = (tmp_path / 'custom').resolve()
139 (module_dir/ 'nominatim.so').write_text('CUSTOM nomiantim.so')
141 monkeypatch.setenv('NOMINATIM_DATABASE_MODULE_PATH', str(module_dir))
142 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
144 tok = tokenizer_factory()
145 tok.init_new_db(test_config)
147 assert not (test_config.project_dir / 'module').exists()
150 def test_init_from_project(tokenizer_setup, tokenizer_factory, test_config):
151 tok = tokenizer_factory()
153 tok.init_from_project(test_config)
155 assert tok.normalization is not None
158 def test_update_sql_functions(sql_preprocessor, temp_db_conn,
159 tokenizer_factory, test_config, table_factory,
160 monkeypatch, temp_db_cursor):
161 monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '1133')
162 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
163 tok = tokenizer_factory()
164 tok.init_new_db(test_config)
167 assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) == '1133'
169 table_factory('test', 'txt TEXT')
171 func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql'
172 func_file.write_text("""INSERT INTO test VALUES ('{{max_word_freq}}'),
173 ('{{modulepath}}')""")
175 tok.update_sql_functions(test_config)
177 test_content = temp_db_cursor.row_set('SELECT * FROM test')
178 assert test_content == set((('1133', ), (str(test_config.project_dir / 'module'), )))
181 def test_finalize_import(tokenizer_factory, temp_db_conn,
182 temp_db_cursor, test_config, monkeypatch,
183 sql_preprocessor_cfg):
184 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
186 func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
187 func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
188 AS $$ SELECT 'b'::text $$ LANGUAGE SQL""")
190 tok = tokenizer_factory()
191 tok.init_new_db(test_config)
193 tok.finalize_import(test_config)
195 temp_db_cursor.scalar('SELECT test()') == 'b'
198 def test_migrate_database(tokenizer_factory, test_config, temp_db_conn, monkeypatch):
199 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
200 tok = tokenizer_factory()
201 tok.migrate_database(test_config)
203 assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) is not None
204 assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_NORMALIZATION) is not None
206 outfile = test_config.project_dir / 'module' / 'nominatim.so'
208 assert outfile.exists()
209 assert outfile.read_text() == 'TEST nominatim.so'
210 assert outfile.stat().st_mode == 33261
213 def test_check_database(test_config, tokenizer_factory, monkeypatch,
214 temp_db_cursor, sql_preprocessor_cfg):
215 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
216 tok = tokenizer_factory()
217 tok.init_new_db(test_config)
219 assert tok.check_database(False) is None
222 def test_check_database_no_tokenizer(test_config, tokenizer_factory):
223 tok = tokenizer_factory()
225 assert tok.check_database(False) is not None
228 def test_check_database_bad_setup(test_config, tokenizer_factory, monkeypatch,
229 temp_db_cursor, sql_preprocessor_cfg):
230 monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
231 tok = tokenizer_factory()
232 tok.init_new_db(test_config)
234 # Inject a bad transliteration.
235 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
236 RETURNS TEXT AS $$ SELECT 'garbage'::text; $$ LANGUAGE SQL""")
238 assert tok.check_database(False) is not None
241 def test_update_statistics_reverse_only(word_table, tokenizer_factory, test_config):
242 tok = tokenizer_factory()
243 tok.update_statistics(test_config)
246 def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory, test_config):
247 word_table.add_full_word(1000, 'hello')
248 table_factory('search_name',
249 'place_id BIGINT, name_vector INT[]',
251 tok = tokenizer_factory()
253 tok.update_statistics(test_config)
255 assert temp_db_cursor.scalar("""SELECT count(*) FROM word
256 WHERE word_token like ' %' and
257 search_name_count > 0""") > 0
260 def test_update_word_tokens(tokenizer_factory):
261 tok = tokenizer_factory()
263 # This is a noop and should just pass.
264 tok.update_word_tokens()
267 def test_normalize(analyzer):
268 assert analyzer.normalize('TEsT') == 'test'
271 def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table,
273 table_factory('location_postcode', 'postcode TEXT',
274 content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
276 analyzer.update_postcodes_from_db()
278 assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
281 def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table,
283 table_factory('location_postcode', 'postcode TEXT',
284 content=(('1234',), ('45BC', ), ('XX45', )))
285 word_table.add_postcode(' 1234', '1234')
286 word_table.add_postcode(' 5678', '5678')
288 analyzer.update_postcodes_from_db()
290 assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
293 def test_update_special_phrase_empty_table(analyzer, word_table, make_standard_name):
294 analyzer.update_special_phrases([
295 ("König bei", "amenity", "royal", "near"),
296 ("Könige", "amenity", "royal", "-"),
297 ("könige", "amenity", "royal", "-"),
298 ("strasse", "highway", "primary", "in")
301 assert word_table.get_special() \
302 == set(((' #könig bei#', 'könig bei', 'amenity', 'royal', 'near'),
303 (' #könige#', 'könige', 'amenity', 'royal', None),
304 (' #strasse#', 'strasse', 'highway', 'primary', 'in')))
307 def test_update_special_phrase_delete_all(analyzer, word_table, make_standard_name):
308 word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
309 word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
311 assert word_table.count_special() == 2
313 analyzer.update_special_phrases([], True)
315 assert word_table.count_special() == 0
318 def test_update_special_phrases_no_replace(analyzer, word_table, make_standard_name):
319 word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
320 word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
322 assert word_table.count_special() == 2
324 analyzer.update_special_phrases([], False)
326 assert word_table.count_special() == 2
329 def test_update_special_phrase_modify(analyzer, word_table, make_standard_name):
330 word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
331 word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
333 assert word_table.count_special() == 2
335 analyzer.update_special_phrases([
336 ('prison', 'amenity', 'prison', 'in'),
337 ('bar', 'highway', 'road', '-'),
338 ('garden', 'leisure', 'garden', 'near')
341 assert word_table.get_special() \
342 == set(((' #prison#', 'prison', 'amenity', 'prison', 'in'),
343 (' #bar#', 'bar', 'highway', 'road', None),
344 (' #garden#', 'garden', 'leisure', 'garden', 'near')))
347 def test_add_country_names(analyzer, word_table, make_standard_name):
348 analyzer.add_country_names('de', {'name': 'Germany',
349 'name:de': 'Deutschland',
350 'short_name': 'germany'})
352 assert word_table.get_country() \
353 == {('de', ' #germany#'),
354 ('de', ' #deutschland#')}
357 def test_add_more_country_names(analyzer, word_table, make_standard_name):
358 word_table.add_country('fr', ' #france#')
359 word_table.add_country('it', ' #italy#')
360 word_table.add_country('it', ' #itala#')
362 analyzer.add_country_names('it', {'name': 'Italy', 'ref': 'IT'})
364 assert word_table.get_country() \
365 == {('fr', ' #france#'),
371 @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
372 def test_process_place_postcode(analyzer, create_postcode_id, word_table, pcode):
373 analyzer.process_place(PlaceInfo({'address': {'postcode' : pcode}}))
375 assert word_table.get_postcodes() == {pcode, }
378 @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
379 def test_process_place_bad_postcode(analyzer, create_postcode_id, word_table, pcode):
380 analyzer.process_place(PlaceInfo({'address': {'postcode' : pcode}}))
382 assert not word_table.get_postcodes()
385 class TestHousenumberName:
388 @pytest.fixture(autouse=True)
389 def setup_create_housenumbers(temp_db_cursor):
390 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_housenumbers(
392 OUT tokens TEXT, OUT normtext TEXT)
394 SELECT housenumbers::TEXT, array_to_string(housenumbers, ';')
399 @pytest.mark.parametrize('hnr', ['123a', '1', '101'])
400 def test_process_place_housenumbers_simple(analyzer, hnr):
401 info = analyzer.process_place(PlaceInfo({'address': {'housenumber' : hnr}}))
403 assert info['hnr'] == hnr
404 assert info['hnr_tokens'].startswith("{")
408 def test_process_place_housenumbers_lists(analyzer):
409 info = analyzer.process_place(PlaceInfo({'address': {'conscriptionnumber' : '1; 2;3'}}))
411 assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
415 def test_process_place_housenumbers_duplicates(analyzer):
416 info = analyzer.process_place(PlaceInfo({'address': {'housenumber' : '134',
417 'conscriptionnumber' : '134',
418 'streetnumber' : '99a'}}))
420 assert set(info['hnr'].split(';')) == set(('134', '99a'))
423 class TestPlaceNames:
425 @pytest.fixture(autouse=True)
426 def setup(self, analyzer):
427 self.analyzer = analyzer
430 def expect_name_terms(self, info, *expected_terms):
431 tokens = self.analyzer.get_word_token_info(list(expected_terms))
433 assert token[2] is not None, "No token for {0}".format(token)
435 assert eval(info['names']) == set((t[2] for t in tokens)),\
436 f"Expected: {tokens}\nGot: {info['names']}"
439 def process_named_place(self, names):
440 return self.analyzer.process_place(PlaceInfo({'name': names}))
443 def test_simple_names(self):
444 info = self.process_named_place({'name': 'Soft bAr', 'ref': '34'})
446 self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
449 @pytest.mark.parametrize('sep', [',' , ';'])
450 def test_names_with_separator(self, sep):
451 info = self.process_named_place({'name': sep.join(('New York', 'Big Apple'))})
453 self.expect_name_terms(info, '#New York', '#Big Apple',
454 'new', 'york', 'big', 'apple')
457 def test_full_names_with_bracket(self):
458 info = self.process_named_place({'name': 'Houseboat (left)'})
460 self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
461 'houseboat', '(left)')
464 def test_country_name(self, word_table):
465 place = PlaceInfo({'name' : {'name': 'Norge'},
466 'country_code': 'no',
469 'type': 'administrative'})
471 info = self.analyzer.process_place(place)
473 self.expect_name_terms(info, '#norge', 'norge')
474 assert word_table.get_country() == {('no', ' norge')}
477 class TestPlaceAddress:
479 @pytest.fixture(autouse=True)
480 def setup(self, analyzer):
481 self.analyzer = analyzer
485 def getorcreate_hnr_id(self, temp_db_cursor):
486 temp_db_cursor.execute("""CREATE SEQUENCE seq_hnr start 1;
487 CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT)
488 RETURNS INTEGER AS $$
489 SELECT -nextval('seq_hnr')::INTEGER; $$ LANGUAGE SQL""")
491 def process_address(self, **kwargs):
492 return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
495 def name_token_set(self, *expected_terms):
496 tokens = self.analyzer.get_word_token_info(list(expected_terms))
498 assert token[2] is not None, "No token for {0}".format(token)
500 return set((t[2] for t in tokens))
503 @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
504 def test_process_place_postcode(self, word_table, pcode):
505 self.process_address(postcode=pcode)
507 assert word_table.get_postcodes() == {pcode, }
510 @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
511 def test_process_place_bad_postcode(self, word_table, pcode):
512 self.process_address(postcode=pcode)
514 assert not word_table.get_postcodes()
517 @pytest.mark.parametrize('hnr', ['123a', '0', '101'])
518 def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
519 info = self.process_address(housenumber=hnr)
521 assert info['hnr'] == hnr.lower()
522 assert info['hnr_tokens'] == "{-1}"
525 def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
526 info = self.process_address(conscriptionnumber='1; 2;3')
528 assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
529 assert info['hnr_tokens'] == "{-1,-2,-3}"
532 def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
533 info = self.process_address(housenumber='134',
534 conscriptionnumber='134',
537 assert set(info['hnr'].split(';')) == set(('134', '99a'))
538 assert info['hnr_tokens'] == "{-1,-2}"
541 def test_process_place_street(self):
542 # legacy tokenizer only indexes known names
543 self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road'}}))
544 info = self.process_address(street='Grand Road')
546 assert eval(info['street']) == self.name_token_set('#Grand Road')
549 def test_process_place_street_empty(self):
550 info = self.process_address(street='🜵')
552 assert info['street'] == '{}'
555 def test_process_place_place(self):
556 self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Honu Lulu'}}))
557 info = self.process_address(place='Honu Lulu')
559 assert eval(info['place_search']) == self.name_token_set('#Honu Lulu',
561 assert eval(info['place_match']) == self.name_token_set('#Honu Lulu')
564 def test_process_place_place_empty(self):
565 info = self.process_address(place='🜵')
567 assert 'place' not in info
570 def test_process_place_address_terms(self):
571 for name in ('Zwickau', 'Haupstraße', 'Sachsen'):
572 self.analyzer.process_place(PlaceInfo({'name': {'name' : name}}))
573 info = self.process_address(country='de', city='Zwickau', state='Sachsen',
574 suburb='Zwickau', street='Hauptstr',
575 full='right behind the church')
577 city = self.name_token_set('ZWICKAU')
578 state = self.name_token_set('SACHSEN')
581 result = {k: eval(v[0]) for k,v in info['addr'].items()}
583 assert result == {'city': city, 'suburb': city, 'state': state}
586 def test_process_place_address_terms_empty(self):
587 info = self.process_address(country='de', city=' ', street='Hauptstr',
588 full='right behind the church')
590 assert 'addr' not in info