]> git.openstreetmap.org Git - nominatim.git/commitdiff
add tests for cleaning housenumbers
authorSarah Hoffmann <lonvia@denofr.de>
Thu, 20 Jan 2022 22:47:20 +0000 (23:47 +0100)
committerSarah Hoffmann <lonvia@denofr.de>
Thu, 20 Jan 2022 22:47:20 +0000 (23:47 +0100)
.github/workflows/ci-tests.yml
nominatim/clicmd/refresh.py
test/python/cli/conftest.py
test/python/cli/test_cmd_refresh.py
test/python/mock_icu_word_table.py
test/python/tokenizer/test_icu.py
test/python/tokenizer/test_legacy.py

index 23d640d7f084a9347c963db14ab1c9b5bacd8edd..f326c3caed7897e2f625d31631e116d909c7ddb3 100644 (file)
@@ -309,12 +309,20 @@ jobs:
                   NOMINATIM_REPLICATION_MAX_DIFF=1 nominatim replication --once
               working-directory: /home/nominatim/nominatim-project
 
+            - name: Clean up database
+              run: nominatim refresh --postcodes --word-tokens
+              working-directory: /home/nominatim/nominatim-project
+
             - name: Run reverse-only import
               run : |
                   echo 'NOMINATIM_DATABASE_DSN="pgsql:dbname=reverse"' >> .env
                   nominatim import --osm-file ../test.pbf --reverse-only --no-updates
               working-directory: /home/nominatim/data-env-reverse
 
-            - name: Check reverse import
+            - name: Check reverse-only import
               run: nominatim admin --check-database
               working-directory: /home/nominatim/data-env-reverse
+
+            - name: Clean up database (reverse-only import)
+              run: nominatim refresh --postcodes --word-tokens
+              working-directory: /home/nominatim/nominatim-project
index c741dcf63632fc0c01d8592a66f46d3be0c8bdbd..b8a88b6d615b5b5c04445f393a71a81c1b6cc112 100644 (file)
@@ -79,6 +79,7 @@ class UpdateRefresh:
                           "Postcode updates on a frozen database is not possible.")
 
         if args.word_tokens:
+            LOG.warning('Updating word tokens')
             tokenizer = self._get_tokenizer(args.config)
             tokenizer.update_word_tokens()
 
index ea45f2a105ddbfb57ad46afc86a0cbe155e15805..420740cfc65568abf9b2dae1a53e3245ae6a434d 100644 (file)
@@ -30,6 +30,7 @@ class DummyTokenizer:
         self.update_sql_functions_called = False
         self.finalize_import_called = False
         self.update_statistics_called = False
+        self.update_word_tokens_called = False
 
     def update_sql_functions(self, *args):
         self.update_sql_functions_called = True
@@ -40,6 +41,9 @@ class DummyTokenizer:
     def update_statistics(self):
         self.update_statistics_called = True
 
+    def update_word_tokens(self):
+        self.update_word_tokens_called = True
+
 
 @pytest.fixture
 def cli_call(src_dir):
index e6dce8b382bfb9f5270adeb3a83104a90d7ddd4e..b6281c7a82034b24bda2d8d610d819410860f754 100644 (file)
@@ -39,6 +39,11 @@ class TestRefresh:
         assert self.tokenizer_mock.update_statistics_called
 
 
+    def test_refresh_word_tokens(self):
+        assert self.call_nominatim('refresh', '--word-tokens') == 0
+        assert self.tokenizer_mock.update_word_tokens_called
+
+
     def test_refresh_postcodes(self, mock_func_factory, place_table):
         func_mock = mock_func_factory(nominatim.tools.postcodes, 'update_postcodes')
         idx_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_postcodes')
index f5d89e4f277d5934fbf0ed6caacdcc3d02aa3c9f..a7363958859ac0cece72ffd9dc0da443093f788c 100644 (file)
@@ -58,6 +58,14 @@ class MockIcuWordTable:
         self.conn.commit()
 
 
+    def add_housenumber(self, word_id, word_token):
+        with self.conn.cursor() as cur:
+            cur.execute("""INSERT INTO word (word_id, word_token, type)
+                              VALUES (%s, %s, 'H')
+                        """, (word_id, word_token))
+        self.conn.commit()
+
+
     def count(self):
         with self.conn.cursor() as cur:
             return cur.scalar("SELECT count(*) FROM word")
@@ -68,6 +76,11 @@ class MockIcuWordTable:
             return cur.scalar("SELECT count(*) FROM word WHERE type = 'S'")
 
 
+    def count_housenumbers(self):
+        with self.conn.cursor() as cur:
+            return cur.scalar("SELECT count(*) FROM word WHERE type = 'H'")
+
+
     def get_special(self):
         with self.conn.cursor() as cur:
             cur.execute("SELECT word_token, info, word FROM word WHERE type = 'S'")
index a3839365a750baa9c39ad8555acea1416fee9c79..372df9d2d4d56672f8478c2bb2372cbcf9821475 100644 (file)
@@ -9,6 +9,7 @@ Tests for ICU tokenizer.
 """
 import shutil
 import yaml
+import itertools
 
 import pytest
 
@@ -554,3 +555,69 @@ class TestPlaceAddress:
 
         assert 'addr' not in info
 
+
+class TestUpdateWordTokens:
+
+    @pytest.fixture(autouse=True)
+    def setup(self, tokenizer_factory, table_factory, placex_table, word_table):
+        table_factory('search_name', 'place_id BIGINT, name_vector INT[]')
+        self.tok = tokenizer_factory()
+
+
+    @pytest.fixture
+    def search_entry(self, temp_db_cursor):
+        place_id = itertools.count(1000)
+
+        def _insert(*args):
+            temp_db_cursor.execute("INSERT INTO search_name VALUES (%s, %s)",
+                                   (next(place_id), list(args)))
+
+        return _insert
+
+
+    @pytest.mark.parametrize('hnr', ('1a', '1234567', '34 5'))
+    def test_remove_unused_housenumbers(self, word_table, hnr):
+        word_table.add_housenumber(1000, hnr)
+
+        assert word_table.count_housenumbers() == 1
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 0
+
+
+    def test_keep_unused_numeral_housenumbers(self, word_table):
+        word_table.add_housenumber(1000, '5432')
+
+        assert word_table.count_housenumbers() == 1
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_search_name_table(self, word_table, search_entry):
+        word_table.add_housenumber(9999, '5432a')
+        word_table.add_housenumber(9991, '9 a')
+        search_entry(123, 9999, 34)
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_placex_table(self, word_table, placex_table):
+        word_table.add_housenumber(9999, '5432a')
+        word_table.add_housenumber(9990, '34z')
+        placex_table.add(housenumber='34z')
+        placex_table.add(housenumber='25432a')
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_placex_table_hnr_list(self, word_table, placex_table):
+        word_table.add_housenumber(9991, '9 b')
+        word_table.add_housenumber(9990, '34z')
+        placex_table.add(housenumber='9 a;9 b;9 c')
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
index 4addb2820582ddeba8b0fde2f5b102fded63d27a..0e46f1dc8928ebeede61714cc6f52dd495faa61c 100644 (file)
@@ -257,6 +257,13 @@ def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_
                                           search_name_count > 0""") > 0
 
 
+def test_update_word_tokens(tokenizer_factory):
+    tok = tokenizer_factory()
+
+    # This is a noop and should just pass.
+    tok.update_word_tokens()
+
+
 def test_normalize(analyzer):
     assert analyzer.normalize('TEsT') == 'test'