2 Tests for Legacy ICU tokenizer.
9 from nominatim.tokenizer import icu_tokenizer
10 from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
11 from nominatim.db import properties
12 from nominatim.db.sql_preprocessor import SQLPreprocessor
13 from nominatim.indexer.place_info import PlaceInfo
15 from mock_icu_word_table import MockIcuWordTable
18 def word_table(temp_db_conn):
19 return MockIcuWordTable(temp_db_conn)
23 def test_config(def_config, tmp_path):
24 def_config.project_dir = tmp_path / 'project'
25 def_config.project_dir.mkdir()
27 sqldir = tmp_path / 'sql'
29 (sqldir / 'tokenizer').mkdir()
30 (sqldir / 'tokenizer' / 'icu_tokenizer.sql').write_text("SELECT 'a'")
31 shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
32 str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql'))
34 def_config.lib_dir.sql = sqldir
40 def tokenizer_factory(dsn, tmp_path, property_table,
41 sql_preprocessor, place_table, word_table):
42 (tmp_path / 'tokenizer').mkdir()
45 return icu_tokenizer.create(dsn, tmp_path / 'tokenizer')
51 def db_prop(temp_db_conn):
52 def _get_db_property(name):
53 return properties.get_property(temp_db_conn, name)
55 return _get_db_property
59 def analyzer(tokenizer_factory, test_config, monkeypatch,
60 temp_db_with_extensions, tmp_path):
61 sql = tmp_path / 'sql' / 'tokenizer' / 'icu_tokenizer.sql'
62 sql.write_text("SELECT 'a';")
64 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
65 tok = tokenizer_factory()
66 tok.init_new_db(test_config)
69 def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
70 variants=('~gasse -> gasse', 'street => st', ),
72 cfgstr = {'normalization': list(norm),
73 'sanitizers': sanitizers,
74 'transliteration': list(trans),
75 'token-analysis': [{'analyzer': 'generic',
76 'variants': [{'words': list(variants)}]}]}
77 (test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(cfgstr))
78 tok.loader = ICURuleLoader(test_config)
80 return tok.name_analyzer()
85 def sql_functions(temp_db_conn, def_config, src_dir):
86 orig_sql = def_config.lib_dir.sql
87 def_config.lib_dir.sql = src_dir / 'lib-sql'
88 sqlproc = SQLPreprocessor(temp_db_conn, def_config)
89 sqlproc.run_sql_file(temp_db_conn, 'functions/utils.sql')
90 sqlproc.run_sql_file(temp_db_conn, 'tokenizer/icu_tokenizer.sql')
91 def_config.lib_dir.sql = orig_sql
95 def getorcreate_full_word(temp_db_cursor):
96 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_full_word(
97 norm_term TEXT, lookup_terms TEXT[],
99 OUT partial_tokens INT[])
102 partial_terms TEXT[] = '{}'::TEXT[];
107 SELECT min(word_id) INTO full_token
108 FROM word WHERE info->>'word' = norm_term and type = 'W';
110 IF full_token IS NULL THEN
111 full_token := nextval('seq_word');
112 INSERT INTO word (word_id, word_token, type, info)
113 SELECT full_token, lookup_term, 'W',
114 json_build_object('word', norm_term, 'count', 0)
115 FROM unnest(lookup_terms) as lookup_term;
118 FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
120 IF NOT (ARRAY[term] <@ partial_terms) THEN
121 partial_terms := partial_terms || term;
125 partial_tokens := '{}'::INT[];
126 FOR term IN SELECT unnest(partial_terms) LOOP
127 SELECT min(word_id), max(info->>'count') INTO term_id, term_count
128 FROM word WHERE word_token = term and type = 'w';
130 IF term_id IS NULL THEN
131 term_id := nextval('seq_word');
133 INSERT INTO word (word_id, word_token, type, info)
134 VALUES (term_id, term, 'w', json_build_object('count', term_count));
137 IF NOT (ARRAY[term_id] <@ partial_tokens) THEN
138 partial_tokens := partial_tokens || term_id;
148 def getorcreate_hnr_id(temp_db_cursor):
149 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
150 RETURNS INTEGER AS $$
151 SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
154 def test_init_new(tokenizer_factory, test_config, monkeypatch, db_prop):
155 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
157 tok = tokenizer_factory()
158 tok.init_new_db(test_config)
160 assert db_prop(icu_tokenizer.DBCFG_TERM_NORMALIZATION) == ':: lower();'
163 def test_init_word_table(tokenizer_factory, test_config, place_row, temp_db_cursor):
164 place_row(names={'name' : 'Test Area', 'ref' : '52'})
165 place_row(names={'name' : 'No Area'})
166 place_row(names={'name' : 'Holzstrasse'})
168 tok = tokenizer_factory()
169 tok.init_new_db(test_config)
171 assert temp_db_cursor.table_exists('word')
174 def test_init_from_project(monkeypatch, test_config, tokenizer_factory):
175 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
176 tok = tokenizer_factory()
177 tok.init_new_db(test_config)
180 tok = tokenizer_factory()
181 tok.init_from_project(test_config)
183 assert tok.loader is not None
184 assert tok.term_normalization == ':: lower();'
187 def test_update_sql_functions(db_prop, temp_db_cursor,
188 tokenizer_factory, test_config, table_factory,
190 tok = tokenizer_factory()
191 tok.init_new_db(test_config)
193 table_factory('test', 'txt TEXT')
195 func_file = test_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer.sql'
196 func_file.write_text("""INSERT INTO test VALUES (1133)""")
198 tok.update_sql_functions(test_config)
200 test_content = temp_db_cursor.row_set('SELECT * FROM test')
201 assert test_content == set((('1133', ), ))
204 def test_normalize_postcode(analyzer):
205 with analyzer() as anl:
206 anl.normalize_postcode('123') == '123'
207 anl.normalize_postcode('ab-34 ') == 'AB-34'
208 anl.normalize_postcode('38 Б') == '38 Б'
211 def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table):
212 table_factory('location_postcode', 'postcode TEXT',
213 content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
215 with analyzer() as anl:
216 anl.update_postcodes_from_db()
218 assert word_table.count() == 3
219 assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
222 def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table):
223 table_factory('location_postcode', 'postcode TEXT',
224 content=(('1234',), ('45BC', ), ('XX45', )))
225 word_table.add_postcode(' 1234', '1234')
226 word_table.add_postcode(' 5678', '5678')
228 with analyzer() as anl:
229 anl.update_postcodes_from_db()
231 assert word_table.count() == 3
232 assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
235 def test_update_special_phrase_empty_table(analyzer, word_table):
236 with analyzer() as anl:
237 anl.update_special_phrases([
238 ("König bei", "amenity", "royal", "near"),
239 ("Könige ", "amenity", "royal", "-"),
240 ("street", "highway", "primary", "in")
243 assert word_table.get_special() \
244 == {('KÖNIG BEI', 'König bei', 'amenity', 'royal', 'near'),
245 ('KÖNIGE', 'Könige', 'amenity', 'royal', None),
246 ('STREET', 'street', 'highway', 'primary', 'in')}
249 def test_update_special_phrase_delete_all(analyzer, word_table):
250 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
251 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
253 assert word_table.count_special() == 2
255 with analyzer() as anl:
256 anl.update_special_phrases([], True)
258 assert word_table.count_special() == 0
261 def test_update_special_phrases_no_replace(analyzer, word_table):
262 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
263 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
265 assert word_table.count_special() == 2
267 with analyzer() as anl:
268 anl.update_special_phrases([], False)
270 assert word_table.count_special() == 2
273 def test_update_special_phrase_modify(analyzer, word_table):
274 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
275 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
277 assert word_table.count_special() == 2
279 with analyzer() as anl:
280 anl.update_special_phrases([
281 ('prison', 'amenity', 'prison', 'in'),
282 ('bar', 'highway', 'road', '-'),
283 ('garden', 'leisure', 'garden', 'near')
286 assert word_table.get_special() \
287 == {('PRISON', 'prison', 'amenity', 'prison', 'in'),
288 ('BAR', 'bar', 'highway', 'road', None),
289 ('GARDEN', 'garden', 'leisure', 'garden', 'near')}
292 def test_add_country_names_new(analyzer, word_table):
293 with analyzer() as anl:
294 anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'})
296 assert word_table.get_country() == {('es', 'ESPAGÑA'), ('es', 'SPAIN')}
299 def test_add_country_names_extend(analyzer, word_table):
300 word_table.add_country('ch', 'SCHWEIZ')
302 with analyzer() as anl:
303 anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'})
305 assert word_table.get_country() == {('ch', 'SCHWEIZ'), ('ch', 'SUISSE')}
308 class TestPlaceNames:
310 @pytest.fixture(autouse=True)
311 def setup(self, analyzer, sql_functions):
312 sanitizers = [{'step': 'split-name-list'},
313 {'step': 'strip-brace-terms'}]
314 with analyzer(sanitizers=sanitizers) as anl:
319 def expect_name_terms(self, info, *expected_terms):
320 tokens = self.analyzer.get_word_token_info(expected_terms)
322 assert token[2] is not None, "No token for {0}".format(token)
324 assert eval(info['names']) == set((t[2] for t in tokens))
327 def process_named_place(self, names):
328 return self.analyzer.process_place(PlaceInfo({'name': names}))
331 def test_simple_names(self):
332 info = self.process_named_place({'name': 'Soft bAr', 'ref': '34'})
334 self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
337 @pytest.mark.parametrize('sep', [',' , ';'])
338 def test_names_with_separator(self, sep):
339 info = self.process_named_place({'name': sep.join(('New York', 'Big Apple'))})
341 self.expect_name_terms(info, '#New York', '#Big Apple',
342 'new', 'york', 'big', 'apple')
345 def test_full_names_with_bracket(self):
346 info = self.process_named_place({'name': 'Houseboat (left)'})
348 self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
352 def test_country_name(self, word_table):
353 place = PlaceInfo({'name' : {'name': 'Norge'},
354 'country_code': 'no',
357 'type': 'administrative'})
359 info = self.analyzer.process_place(place)
361 self.expect_name_terms(info, '#norge', 'norge')
362 assert word_table.get_country() == {('no', 'NORGE')}
365 class TestPlaceAddress:
367 @pytest.fixture(autouse=True)
368 def setup(self, analyzer, sql_functions):
369 with analyzer(trans=(":: upper()", "'🜵' > ' '")) as anl:
374 def process_address(self, **kwargs):
375 return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
378 def name_token_set(self, *expected_terms):
379 tokens = self.analyzer.get_word_token_info(expected_terms)
381 assert token[2] is not None, "No token for {0}".format(token)
383 return set((t[2] for t in tokens))
386 @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
387 def test_process_place_postcode(self, word_table, pcode):
388 self.process_address(postcode=pcode)
390 assert word_table.get_postcodes() == {pcode, }
393 @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
394 def test_process_place_bad_postcode(self, word_table, pcode):
395 self.process_address(postcode=pcode)
397 assert not word_table.get_postcodes()
400 @pytest.mark.parametrize('hnr', ['123a', '1', '101'])
401 def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
402 info = self.process_address(housenumber=hnr)
404 assert info['hnr'] == hnr.upper()
405 assert info['hnr_tokens'] == "{-1}"
408 def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
409 info = self.process_address(conscriptionnumber='1; 2;3')
411 assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
412 assert info['hnr_tokens'] == "{-1,-2,-3}"
415 def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
416 info = self.process_address(housenumber='134',
417 conscriptionnumber='134',
420 assert set(info['hnr'].split(';')) == set(('134', '99A'))
421 assert info['hnr_tokens'] == "{-1,-2}"
424 def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
425 info = self.process_address(housenumber="45")
426 assert info['hnr_tokens'] == "{-1}"
428 info = self.process_address(housenumber="46")
429 assert info['hnr_tokens'] == "{-2}"
431 info = self.process_address(housenumber="41;45")
432 assert eval(info['hnr_tokens']) == {-1, -3}
434 info = self.process_address(housenumber="41")
435 assert eval(info['hnr_tokens']) == {-3}
438 def test_process_place_street(self):
439 info = self.process_address(street='Grand Road')
441 assert eval(info['street']) == self.name_token_set('GRAND', 'ROAD')
444 def test_process_place_street_empty(self):
445 info = self.process_address(street='🜵')
447 assert 'street' not in info
450 def test_process_place_place(self):
451 info = self.process_address(place='Honu Lulu')
453 assert eval(info['place']) == self.name_token_set('HONU', 'LULU')
456 def test_process_place_place_empty(self):
457 info = self.process_address(place='🜵')
459 assert 'place' not in info
462 def test_process_place_address_terms(self):
463 info = self.process_address(country='de', city='Zwickau', state='Sachsen',
464 suburb='Zwickau', street='Hauptstr',
465 full='right behind the church')
467 city = self.name_token_set('ZWICKAU')
468 state = self.name_token_set('SACHSEN')
470 result = {k: eval(v) for k,v in info['addr'].items()}
472 assert result == {'city': city, 'suburb': city, 'state': state}
475 def test_process_place_address_terms_empty(self):
476 info = self.process_address(country='de', city=' ', street='Hauptstr',
477 full='right behind the church')
479 assert 'addr' not in info