From 23fd1d032a96ee0821a103493d1a5d41a03546cb Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Wed, 28 Apr 2021 09:14:32 +0200 Subject: [PATCH] tests for legacy tokenizer --- nominatim/tokenizer/legacy_tokenizer.py | 1 + test/python/test_tokenizer_legacy.py | 73 ++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 00b9f50e..ac26c5cd 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -135,6 +135,7 @@ class LegacyTokenizer: This is a special migration function for updating existing databases to new software versions. """ + self.normalization = config.TERM_NORMALIZATION module_dir = _install_module(config.DATABASE_MODULE_PATH, config.lib_dir.module, config.project_dir / 'module') diff --git a/test/python/test_tokenizer_legacy.py b/test/python/test_tokenizer_legacy.py index 95b49be3..0d1169ad 100644 --- a/test/python/test_tokenizer_legacy.py +++ b/test/python/test_tokenizer_legacy.py @@ -31,12 +31,11 @@ def test_config(def_config, tmp_path): def_config.lib_dir.sql = sqldir def_config.lib_dir.data = sqldir - return def_config @pytest.fixture -def tokenizer_factory(dsn, tmp_path, monkeypatch): +def tokenizer_factory(dsn, tmp_path, monkeypatch, property_table): def _maker(): return legacy_tokenizer.create(dsn, tmp_path / 'tokenizer') @@ -44,14 +43,32 @@ def tokenizer_factory(dsn, tmp_path, monkeypatch): return _maker @pytest.fixture -def tokenizer_setup(tokenizer_factory, test_config, property_table, - monkeypatch, sql_preprocessor): +def tokenizer_setup(tokenizer_factory, test_config, monkeypatch, sql_preprocessor): + monkeypatch.setattr(legacy_tokenizer, '_check_module' , lambda m, c: None) + tok = tokenizer_factory() + tok.init_new_db(test_config) + + +@pytest.fixture +def analyzer(tokenizer_factory, test_config, monkeypatch, sql_preprocessor, + word_table, temp_db_with_extensions, tmp_path): + sql = tmp_path / 'sql' / 'tokenizer' / 'legacy_tokenizer.sql' + sql.write_text(""" + CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT) + RETURNS INTEGER AS $$ SELECT 342; $$ LANGUAGE SQL; + """) + monkeypatch.setattr(legacy_tokenizer, '_check_module' , lambda m, c: None) + monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();') tok = tokenizer_factory() tok.init_new_db(test_config) + monkeypatch.undo() + with tok.name_analyzer() as analyzer: + yield analyzer -def test_init_new(tokenizer_factory, test_config, property_table, monkeypatch, + +def test_init_new(tokenizer_factory, test_config, monkeypatch, temp_db_conn, sql_preprocessor): monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', 'xxvv') monkeypatch.setattr(legacy_tokenizer, '_check_module' , lambda m, c: None) @@ -68,7 +85,7 @@ def test_init_new(tokenizer_factory, test_config, property_table, monkeypatch, assert outfile.stat().st_mode == 33261 -def test_init_module_load_failed(tokenizer_factory, test_config, property_table, +def test_init_module_load_failed(tokenizer_factory, test_config, monkeypatch, temp_db_conn): tok = tokenizer_factory() @@ -76,7 +93,7 @@ def test_init_module_load_failed(tokenizer_factory, test_config, property_table, tok.init_new_db(test_config) -def test_init_module_custom(tokenizer_factory, test_config, property_table, +def test_init_module_custom(tokenizer_factory, test_config, monkeypatch, tmp_path, sql_preprocessor): module_dir = (tmp_path / 'custom').resolve() module_dir.mkdir() @@ -97,3 +114,45 @@ def test_init_from_project(tokenizer_setup, tokenizer_factory): tok.init_from_project() assert tok.normalization is not None + + +def test_update_sql_functions(sql_preprocessor, temp_db_conn, + tokenizer_factory, test_config, table_factory, + monkeypatch, temp_db_cursor): + monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '1133') + monkeypatch.setattr(legacy_tokenizer, '_check_module' , lambda m, c: None) + tok = tokenizer_factory() + tok.init_new_db(test_config) + monkeypatch.undo() + + assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) == '1133' + + table_factory('test', 'txt TEXT') + + func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql' + func_file.write_text("""INSERT INTO test VALUES ('{{max_word_freq}}'), + ('{{modulepath}}')""") + + tok.update_sql_functions(test_config) + + test_content = temp_db_cursor.row_set('SELECT * FROM test') + assert test_content == set((('1133', ), (str(test_config.project_dir / 'module'), ))) + + +def test_migrate_database(tokenizer_factory, test_config, temp_db_conn, monkeypatch): + monkeypatch.setattr(legacy_tokenizer, '_check_module' , lambda m, c: None) + tok = tokenizer_factory() + tok.migrate_database(test_config) + + assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) is not None + assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_NORMALIZATION) is not None + + outfile = test_config.project_dir / 'module' / 'nominatim.so' + + assert outfile.exists() + assert outfile.read_text() == 'TEST nomiantim.so' + assert outfile.stat().st_mode == 33261 + + +def test_normalize(analyzer): + assert analyzer.normalize('TEsT') == 'test' -- 2.39.5