-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
-# Copyright (C) 2022 by the Nominatim developer community.
+# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for legacy tokenizer.
import pytest
-from nominatim.indexer.place_info import PlaceInfo
-from nominatim.tokenizer import legacy_tokenizer
-from nominatim.db import properties
-from nominatim.errors import UsageError
+from nominatim_db.data.place_info import PlaceInfo
+from nominatim_db.tokenizer import legacy_tokenizer
+from nominatim_db.db import properties
+from nominatim_db.errors import UsageError
from mock_legacy_word_table import MockLegacyWordTable
def test_config(project_env, tmp_path):
module_dir = tmp_path / 'module_src'
module_dir.mkdir()
- (module_dir / 'nominatim.so').write_text('TEST nomiantim.so')
+ (module_dir / 'nominatim.so').write_text('TEST nominatim.so')
project_env.lib_dir.module = module_dir
outfile = test_config.project_dir / 'module' / 'nominatim.so'
assert outfile.exists()
- assert outfile.read_text() == 'TEST nomiantim.so'
+ assert outfile.read_text() == 'TEST nominatim.so'
assert outfile.stat().st_mode == 33261
outfile = test_config.project_dir / 'module' / 'nominatim.so'
assert outfile.exists()
- assert outfile.read_text() == 'TEST nomiantim.so'
+ assert outfile.read_text() == 'TEST nominatim.so'
assert outfile.stat().st_mode == 33261
assert tok.check_database(False) is not None
-def test_update_statistics_reverse_only(word_table, tokenizer_factory):
+def test_update_statistics_reverse_only(word_table, tokenizer_factory, test_config):
tok = tokenizer_factory()
- tok.update_statistics()
+ tok.update_statistics(test_config)
-def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
+def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory, test_config):
word_table.add_full_word(1000, 'hello')
table_factory('search_name',
'place_id BIGINT, name_vector INT[]',
[(12, [1000])])
tok = tokenizer_factory()
- tok.update_statistics()
+ tok.update_statistics(test_config)
assert temp_db_cursor.scalar("""SELECT count(*) FROM word
WHERE word_token like ' %' and
search_name_count > 0""") > 0
+def test_update_word_tokens(tokenizer_factory):
+ tok = tokenizer_factory()
+
+ # This is a noop and should just pass.
+ tok.update_word_tokens()
+
+
def test_normalize(analyzer):
assert analyzer.normalize('TEsT') == 'test'
def test_process_place_street_empty(self):
info = self.process_address(street='🜵')
- assert 'street' not in info
+ assert info['street'] == '{}'
def test_process_place_place(self):