X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/8bf15fa691e286c27c05619ae102fb4db16eda0d..0fb8eade136ea03e7853aca0795ca69833c33661:/test/python/test_tools_database_import.py diff --git a/test/python/test_tools_database_import.py b/test/python/test_tools_database_import.py index 621610cf..aa90f8db 100644 --- a/test/python/test_tools_database_import.py +++ b/test/python/test_tools_database_import.py @@ -98,14 +98,25 @@ def test_import_base_data_ignore_partitions(dsn, src_dir, temp_db_with_extension def test_import_osm_data_simple(table_factory, osm2pgsql_options): table_factory('place', content=((1, ), )) - database_import.import_osm_data('file.pdf', osm2pgsql_options) + database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options) + + +def test_import_osm_data_multifile(table_factory, tmp_path, osm2pgsql_options): + table_factory('place', content=((1, ), )) + osm2pgsql_options['osm2pgsql_cache'] = 0 + + files = [tmp_path / 'file1.osm', tmp_path / 'file2.osm'] + for f in files: + f.write_text('test') + + database_import.import_osm_data(files, osm2pgsql_options) def test_import_osm_data_simple_no_data(table_factory, osm2pgsql_options): table_factory('place') with pytest.raises(UsageError, match='No data.*'): - database_import.import_osm_data('file.pdf', osm2pgsql_options) + database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options) def test_import_osm_data_drop(table_factory, temp_db_conn, tmp_path, osm2pgsql_options): @@ -117,7 +128,7 @@ def test_import_osm_data_drop(table_factory, temp_db_conn, tmp_path, osm2pgsql_o osm2pgsql_options['flatnode_file'] = str(flatfile.resolve()) - database_import.import_osm_data('file.pdf', osm2pgsql_options, drop=True) + database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options, drop=True) assert not flatfile.exists() assert not temp_db_conn.table_exists('planet_osm_nodes') @@ -180,7 +191,7 @@ def test_create_country_names(temp_db_with_extensions, temp_db_conn, temp_db_cur assert len(tokenizer.analyser_cache['countries']) == 2 - result_set = {k: set(v) for k, v in tokenizer.analyser_cache['countries']} + result_set = {k: set(v.values()) for k, v in tokenizer.analyser_cache['countries']} if languages: assert result_set == {'us' : set(('us', 'us1', 'United States')),