2 Tests for Legacy ICU tokenizer.
9 from nominatim.tokenizer import icu_tokenizer
10 from nominatim.tokenizer.icu_name_processor import ICUNameProcessorRules
11 from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
12 from nominatim.db import properties
14 from mock_icu_word_table import MockIcuWordTable
17 def word_table(temp_db_conn):
18 return MockIcuWordTable(temp_db_conn)
22 def test_config(def_config, tmp_path):
23 def_config.project_dir = tmp_path / 'project'
24 def_config.project_dir.mkdir()
26 sqldir = tmp_path / 'sql'
28 (sqldir / 'tokenizer').mkdir()
29 (sqldir / 'tokenizer' / 'icu_tokenizer.sql').write_text("SELECT 'a'")
30 shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
31 str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql'))
33 def_config.lib_dir.sql = sqldir
39 def tokenizer_factory(dsn, tmp_path, property_table,
40 sql_preprocessor, place_table, word_table):
41 (tmp_path / 'tokenizer').mkdir()
44 return icu_tokenizer.create(dsn, tmp_path / 'tokenizer')
50 def db_prop(temp_db_conn):
51 def _get_db_property(name):
52 return properties.get_property(temp_db_conn, name)
54 return _get_db_property
58 def analyzer(tokenizer_factory, test_config, monkeypatch,
59 temp_db_with_extensions, tmp_path):
60 sql = tmp_path / 'sql' / 'tokenizer' / 'icu_tokenizer.sql'
61 sql.write_text("SELECT 'a';")
63 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
64 tok = tokenizer_factory()
65 tok.init_new_db(test_config)
68 def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
69 variants=('~gasse -> gasse', 'street => st', )):
70 cfgstr = {'normalization' : list(norm),
71 'transliteration' : list(trans),
72 'variants' : [ {'words': list(variants)}]}
73 tok.naming_rules = ICUNameProcessorRules(loader=ICURuleLoader(cfgstr))
75 return tok.name_analyzer()
81 def getorcreate_full_word(temp_db_cursor):
82 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_full_word(
83 norm_term TEXT, lookup_terms TEXT[],
85 OUT partial_tokens INT[])
88 partial_terms TEXT[] = '{}'::TEXT[];
93 SELECT min(word_id) INTO full_token
94 FROM word WHERE info->>'word' = norm_term and type = 'W';
96 IF full_token IS NULL THEN
97 full_token := nextval('seq_word');
98 INSERT INTO word (word_id, word_token, type, info)
99 SELECT full_token, lookup_term, 'W',
100 json_build_object('word', norm_term, 'count', 0)
101 FROM unnest(lookup_terms) as lookup_term;
104 FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
106 IF NOT (ARRAY[term] <@ partial_terms) THEN
107 partial_terms := partial_terms || term;
111 partial_tokens := '{}'::INT[];
112 FOR term IN SELECT unnest(partial_terms) LOOP
113 SELECT min(word_id), max(info->>'count') INTO term_id, term_count
114 FROM word WHERE word_token = term and type = 'w';
116 IF term_id IS NULL THEN
117 term_id := nextval('seq_word');
119 INSERT INTO word (word_id, word_token, type, info)
120 VALUES (term_id, term, 'w', json_build_object('count', term_count));
123 IF NOT (ARRAY[term_id] <@ partial_tokens) THEN
124 partial_tokens := partial_tokens || term_id;
134 def getorcreate_hnr_id(temp_db_cursor):
135 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
136 RETURNS INTEGER AS $$
137 SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
140 def test_init_new(tokenizer_factory, test_config, monkeypatch, db_prop):
141 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
143 tok = tokenizer_factory()
144 tok.init_new_db(test_config)
146 assert db_prop(icu_tokenizer.DBCFG_TERM_NORMALIZATION) == ':: lower();'
147 assert db_prop(icu_tokenizer.DBCFG_MAXWORDFREQ) is not None
150 def test_init_word_table(tokenizer_factory, test_config, place_row, word_table):
151 place_row(names={'name' : 'Test Area', 'ref' : '52'})
152 place_row(names={'name' : 'No Area'})
153 place_row(names={'name' : 'Holzstrasse'})
155 tok = tokenizer_factory()
156 tok.init_new_db(test_config)
158 assert word_table.get_partial_words() == {('test', 1),
159 ('no', 1), ('area', 2),
160 ('holz', 1), ('strasse', 1),
164 def test_init_from_project(monkeypatch, test_config, tokenizer_factory):
165 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
166 monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '90300')
167 tok = tokenizer_factory()
168 tok.init_new_db(test_config)
171 tok = tokenizer_factory()
172 tok.init_from_project()
174 assert tok.naming_rules is not None
175 assert tok.term_normalization == ':: lower();'
176 assert tok.max_word_frequency == '90300'
179 def test_update_sql_functions(db_prop, temp_db_cursor,
180 tokenizer_factory, test_config, table_factory,
182 monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '1133')
183 tok = tokenizer_factory()
184 tok.init_new_db(test_config)
187 assert db_prop(icu_tokenizer.DBCFG_MAXWORDFREQ) == '1133'
189 table_factory('test', 'txt TEXT')
191 func_file = test_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer.sql'
192 func_file.write_text("""INSERT INTO test VALUES ('{{max_word_freq}}')""")
194 tok.update_sql_functions(test_config)
196 test_content = temp_db_cursor.row_set('SELECT * FROM test')
197 assert test_content == set((('1133', ), ))
200 def test_normalize_postcode(analyzer):
201 with analyzer() as anl:
202 anl.normalize_postcode('123') == '123'
203 anl.normalize_postcode('ab-34 ') == 'AB-34'
204 anl.normalize_postcode('38 Б') == '38 Б'
207 def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table):
208 table_factory('location_postcode', 'postcode TEXT',
209 content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
211 with analyzer() as anl:
212 anl.update_postcodes_from_db()
214 assert word_table.count() == 3
215 assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
218 def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table):
219 table_factory('location_postcode', 'postcode TEXT',
220 content=(('1234',), ('45BC', ), ('XX45', )))
221 word_table.add_postcode(' 1234', '1234')
222 word_table.add_postcode(' 5678', '5678')
224 with analyzer() as anl:
225 anl.update_postcodes_from_db()
227 assert word_table.count() == 3
228 assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
231 def test_update_special_phrase_empty_table(analyzer, word_table):
232 with analyzer() as anl:
233 anl.update_special_phrases([
234 ("König bei", "amenity", "royal", "near"),
235 ("Könige ", "amenity", "royal", "-"),
236 ("street", "highway", "primary", "in")
239 assert word_table.get_special() \
240 == {('KÖNIG BEI', 'König bei', 'amenity', 'royal', 'near'),
241 ('KÖNIGE', 'Könige', 'amenity', 'royal', None),
242 ('STREET', 'street', 'highway', 'primary', 'in')}
245 def test_update_special_phrase_delete_all(analyzer, word_table):
246 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
247 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
249 assert word_table.count_special() == 2
251 with analyzer() as anl:
252 anl.update_special_phrases([], True)
254 assert word_table.count_special() == 0
257 def test_update_special_phrases_no_replace(analyzer, word_table):
258 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
259 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
261 assert word_table.count_special() == 2
263 with analyzer() as anl:
264 anl.update_special_phrases([], False)
266 assert word_table.count_special() == 2
269 def test_update_special_phrase_modify(analyzer, word_table):
270 word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
271 word_table.add_special('BAR', 'bar', 'highway', 'road', None)
273 assert word_table.count_special() == 2
275 with analyzer() as anl:
276 anl.update_special_phrases([
277 ('prison', 'amenity', 'prison', 'in'),
278 ('bar', 'highway', 'road', '-'),
279 ('garden', 'leisure', 'garden', 'near')
282 assert word_table.get_special() \
283 == {('PRISON', 'prison', 'amenity', 'prison', 'in'),
284 ('BAR', 'bar', 'highway', 'road', None),
285 ('GARDEN', 'garden', 'leisure', 'garden', 'near')}
288 def test_add_country_names_new(analyzer, word_table):
289 with analyzer() as anl:
290 anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'})
292 assert word_table.get_country() == {('es', 'ESPAGÑA'), ('es', 'SPAIN')}
295 def test_add_country_names_extend(analyzer, word_table):
296 word_table.add_country('ch', 'SCHWEIZ')
298 with analyzer() as anl:
299 anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'})
301 assert word_table.get_country() == {('ch', 'SCHWEIZ'), ('ch', 'SUISSE')}
304 class TestPlaceNames:
306 @pytest.fixture(autouse=True)
307 def setup(self, analyzer, getorcreate_full_word):
308 with analyzer() as anl:
313 def expect_name_terms(self, info, *expected_terms):
314 tokens = self.analyzer.get_word_token_info(expected_terms)
317 assert token[2] is not None, "No token for {0}".format(token)
319 assert eval(info['names']) == set((t[2] for t in tokens))
322 def test_simple_names(self):
323 info = self.analyzer.process_place({'name': {'name': 'Soft bAr', 'ref': '34'}})
325 self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
328 @pytest.mark.parametrize('sep', [',' , ';'])
329 def test_names_with_separator(self, sep):
330 info = self.analyzer.process_place({'name': {'name': sep.join(('New York', 'Big Apple'))}})
332 self.expect_name_terms(info, '#New York', '#Big Apple',
333 'new', 'york', 'big', 'apple')
336 def test_full_names_with_bracket(self):
337 info = self.analyzer.process_place({'name': {'name': 'Houseboat (left)'}})
339 self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
343 def test_country_name(self, word_table):
344 info = self.analyzer.process_place({'name': {'name': 'Norge'},
345 'country_feature': 'no'})
347 self.expect_name_terms(info, '#norge', 'norge')
348 assert word_table.get_country() == {('no', 'NORGE')}
351 class TestPlaceAddress:
353 @pytest.fixture(autouse=True)
354 def setup(self, analyzer, getorcreate_full_word):
355 with analyzer(trans=(":: upper()", "'🜵' > ' '")) as anl:
360 def process_address(self, **kwargs):
361 return self.analyzer.process_place({'address': kwargs})
364 def name_token_set(self, *expected_terms):
365 tokens = self.analyzer.get_word_token_info(expected_terms)
367 assert token[2] is not None, "No token for {0}".format(token)
369 return set((t[2] for t in tokens))
372 @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
373 def test_process_place_postcode(self, word_table, pcode):
374 self.process_address(postcode=pcode)
376 assert word_table.get_postcodes() == {pcode, }
379 @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
380 def test_process_place_bad_postcode(self, word_table, pcode):
381 self.process_address(postcode=pcode)
383 assert not word_table.get_postcodes()
386 @pytest.mark.parametrize('hnr', ['123a', '1', '101'])
387 def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
388 info = self.process_address(housenumber=hnr)
390 assert info['hnr'] == hnr.upper()
391 assert info['hnr_tokens'] == "{-1}"
394 def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
395 info = self.process_address(conscriptionnumber='1; 2;3')
397 assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
398 assert info['hnr_tokens'] == "{-1,-2,-3}"
401 def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
402 info = self.process_address(housenumber='134',
403 conscriptionnumber='134',
406 assert set(info['hnr'].split(';')) == set(('134', '99A'))
407 assert info['hnr_tokens'] == "{-1,-2}"
410 def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
411 info = self.process_address(housenumber="45")
412 assert info['hnr_tokens'] == "{-1}"
414 info = self.process_address(housenumber="46")
415 assert info['hnr_tokens'] == "{-2}"
417 info = self.process_address(housenumber="41;45")
418 assert eval(info['hnr_tokens']) == {-1, -3}
420 info = self.process_address(housenumber="41")
421 assert eval(info['hnr_tokens']) == {-3}
424 def test_process_place_street(self):
425 info = self.process_address(street='Grand Road')
427 assert eval(info['street']) == self.name_token_set('#GRAND ROAD')
430 def test_process_place_street_empty(self):
431 info = self.process_address(street='🜵')
433 assert 'street' not in info
436 def test_process_place_place(self):
437 info = self.process_address(place='Honu Lulu')
439 assert eval(info['place_search']) == self.name_token_set('#HONU LULU',
441 assert eval(info['place_match']) == self.name_token_set('#HONU LULU')
444 def test_process_place_place_empty(self):
445 info = self.process_address(place='🜵')
447 assert 'place_search' not in info
448 assert 'place_match' not in info
451 def test_process_place_address_terms(self):
452 info = self.process_address(country='de', city='Zwickau', state='Sachsen',
453 suburb='Zwickau', street='Hauptstr',
454 full='right behind the church')
456 city_full = self.name_token_set('#ZWICKAU')
457 city_all = self.name_token_set('#ZWICKAU', 'ZWICKAU')
458 state_full = self.name_token_set('#SACHSEN')
459 state_all = self.name_token_set('#SACHSEN', 'SACHSEN')
461 result = {k: [eval(v[0]), eval(v[1])] for k,v in info['addr'].items()}
463 assert result == {'city': [city_all, city_full],
464 'suburb': [city_all, city_full],
465 'state': [state_all, state_full]}
468 def test_process_place_address_terms_empty(self):
469 info = self.process_address(country='de', city=' ', street='Hauptstr',
470 full='right behind the church')
472 assert 'addr' not in info