]> git.openstreetmap.org Git - nominatim.git/blobdiff - test/python/tools/test_tiger_data.py
Merge pull request #3466 from mtmail/apple-silicon-parallels
[nominatim.git] / test / python / tools / test_tiger_data.py
index f027c4ffef19d1eb877038a57153a982fe4db61a..5d65fafb3b5c4d272472269ece832ea7bb220be6 100644 (file)
@@ -1,8 +1,8 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: GPL-3.0-or-later
 #
 # This file is part of Nominatim. (https://nominatim.org)
 #
 #
 # This file is part of Nominatim. (https://nominatim.org)
 #
-# Copyright (C) 2022 by the Nominatim developer community.
+# Copyright (C) 2024 by the Nominatim developer community.
 # For a full list of authors see the git log.
 """
 Test for tiger data function
 # For a full list of authors see the git log.
 """
 Test for tiger data function
@@ -11,9 +11,11 @@ import tarfile
 from textwrap import dedent
 
 import pytest
 from textwrap import dedent
 
 import pytest
+import pytest_asyncio
 
 
-from nominatim.tools import tiger_data
-from nominatim.errors import UsageError
+from nominatim_db.db.connection import execute_scalar
+from nominatim_db.tools import tiger_data, freeze
+from nominatim_db.errors import UsageError
 
 class MockTigerTable:
 
 
 class MockTigerTable:
 
@@ -27,9 +29,11 @@ class MockTigerTable:
                                                token_info JSONB,
                                                postcode TEXT)""")
 
                                                token_info JSONB,
                                                postcode TEXT)""")
 
+            # We need this table to determine if the database is frozen or not
+            cur.execute("CREATE TABLE place (number INTEGER)")
+
     def count(self):
     def count(self):
-        with self.conn.cursor() as cur:
-            return cur.scalar("SELECT count(*) FROM tiger")
+        return execute_scalar(self.conn, "SELECT count(*) FROM tiger")
 
     def row(self):
         with self.conn.cursor() as cur:
 
     def row(self):
         with self.conn.cursor() as cur:
@@ -73,71 +77,91 @@ def csv_factory(tmp_path):
 
 
 @pytest.mark.parametrize("threads", (1, 5))
 
 
 @pytest.mark.parametrize("threads", (1, 5))
-def test_add_tiger_data(def_config, src_dir, tiger_table, tokenizer_mock, threads):
-    tiger_data.add_tiger_data(str(src_dir / 'test' / 'testdb' / 'tiger'),
-                              def_config, threads, tokenizer_mock())
+@pytest.mark.asyncio
+async def test_add_tiger_data(def_config, src_dir, tiger_table, tokenizer_mock, threads):
+    await tiger_data.add_tiger_data(str(src_dir / 'test' / 'testdb' / 'tiger'),
+                                    def_config, threads, tokenizer_mock())
 
     assert tiger_table.count() == 6213
 
 
 
     assert tiger_table.count() == 6213
 
 
-def test_add_tiger_data_no_files(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_database_frozen(def_config, temp_db_conn, tiger_table, tokenizer_mock,
+                                 tmp_path):
+    freeze.drop_update_tables(temp_db_conn)
+
+    with pytest.raises(UsageError) as excinfo:
+        await tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
+
+        assert "database frozen" in str(excinfo.value)
+
+    assert tiger_table.count() == 0
+
+
+@pytest.mark.asyncio
+async def test_add_tiger_data_no_files(def_config, tiger_table, tokenizer_mock,
                                  tmp_path):
                                  tmp_path):
-    tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
+    await tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
 
     assert tiger_table.count() == 0
 
 
 
     assert tiger_table.count() == 0
 
 
-def test_add_tiger_data_bad_file(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_bad_file(def_config, tiger_table, tokenizer_mock,
                                  tmp_path):
     sqlfile = tmp_path / '1010.csv'
     sqlfile.write_text("""Random text""")
 
                                  tmp_path):
     sqlfile = tmp_path / '1010.csv'
     sqlfile.write_text("""Random text""")
 
-    tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
+    await tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
 
     assert tiger_table.count() == 0
 
 
 
     assert tiger_table.count() == 0
 
 
-def test_add_tiger_data_hnr_nan(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_hnr_nan(def_config, tiger_table, tokenizer_mock,
                                 csv_factory, tmp_path):
     csv_factory('file1', hnr_from=99)
     csv_factory('file2', hnr_from='L12')
     csv_factory('file3', hnr_to='12.4')
 
                                 csv_factory, tmp_path):
     csv_factory('file1', hnr_from=99)
     csv_factory('file2', hnr_from='L12')
     csv_factory('file3', hnr_to='12.4')
 
-    tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
+    await tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
 
     assert tiger_table.count() == 1
 
     assert tiger_table.count() == 1
-    assert tiger_table.row()['start'] == 99
+    assert tiger_table.row().start == 99
 
 
 @pytest.mark.parametrize("threads", (1, 5))
 
 
 @pytest.mark.parametrize("threads", (1, 5))
-def test_add_tiger_data_tarfile(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_tarfile(def_config, tiger_table, tokenizer_mock,
                                 tmp_path, src_dir, threads):
     tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
     tar.add(str(src_dir / 'test' / 'testdb' / 'tiger' / '01001.csv'))
     tar.close()
 
                                 tmp_path, src_dir, threads):
     tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
     tar.add(str(src_dir / 'test' / 'testdb' / 'tiger' / '01001.csv'))
     tar.close()
 
-    tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, threads,
-                              tokenizer_mock())
+    await tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, threads,
+                                    tokenizer_mock())
 
     assert tiger_table.count() == 6213
 
 
 
     assert tiger_table.count() == 6213
 
 
-def test_add_tiger_data_bad_tarfile(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_bad_tarfile(def_config, tiger_table, tokenizer_mock,
                                     tmp_path):
     tarfile = tmp_path / 'sample.tar.gz'
     tarfile.write_text("""Random text""")
 
     with pytest.raises(UsageError):
                                     tmp_path):
     tarfile = tmp_path / 'sample.tar.gz'
     tarfile.write_text("""Random text""")
 
     with pytest.raises(UsageError):
-        tiger_data.add_tiger_data(str(tarfile), def_config, 1, tokenizer_mock())
+        await tiger_data.add_tiger_data(str(tarfile), def_config, 1, tokenizer_mock())
 
 
 
 
-def test_add_tiger_data_empty_tarfile(def_config, tiger_table, tokenizer_mock,
+@pytest.mark.asyncio
+async def test_add_tiger_data_empty_tarfile(def_config, tiger_table, tokenizer_mock,
                                       tmp_path):
     tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
     tar.add(__file__)
     tar.close()
 
                                       tmp_path):
     tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
     tar.add(__file__)
     tar.close()
 
-    tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, 1,
-                              tokenizer_mock())
+    await tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, 1,
+                                    tokenizer_mock())
 
     assert tiger_table.count() == 0
 
     assert tiger_table.count() == 0