]> git.openstreetmap.org Git - nominatim.git/blob - test/python/test_tools_country_info.py
US TIGER data 2021 released
[nominatim.git] / test / python / test_tools_country_info.py
1 """
2 Tests for function that handle country properties.
3 """
4
5 import pytest
6
7 from nominatim.tools import country_info
8
9 @pytest.fixture(autouse=True)
10 def read_config(def_config):
11     country_info.setup_country_config(def_config)
12
13 @pytest.mark.parametrize("no_partitions", (True, False))
14 def test_setup_country_tables(src_dir, temp_db_with_extensions, dsn, temp_db_cursor,
15                               def_config, no_partitions):
16     country_info.setup_country_tables(dsn, src_dir / 'data', no_partitions)
17
18     assert temp_db_cursor.table_exists('country_name')
19     assert temp_db_cursor.table_rows('country_name') == \
20              temp_db_cursor.scalar('SELECT count(DISTINCT country_code) FROM country_name')
21
22     partitions = temp_db_cursor.row_set("SELECT DISTINCT partition FROM country_name")
23     if no_partitions:
24         assert partitions == {(0, )}
25     else:
26         assert len(partitions) > 10
27
28     assert temp_db_cursor.table_exists('country_osm_grid')
29     assert temp_db_cursor.table_rows('country_osm_grid') > 100
30
31
32 @pytest.mark.parametrize("languages", (None, ' fr,en'))
33 def test_create_country_names(temp_db_with_extensions, temp_db_conn, temp_db_cursor,
34                               table_factory, tokenizer_mock, languages):
35
36     table_factory('country_name', 'country_code varchar(2), name hstore',
37                   content=(('us', '"name"=>"us1","name:af"=>"us2"'),
38                            ('fr', '"name"=>"Fra", "name:en"=>"Fren"')))
39
40     assert temp_db_cursor.scalar("SELECT count(*) FROM country_name") == 2
41
42     tokenizer = tokenizer_mock()
43
44     country_info.create_country_names(temp_db_conn, tokenizer, languages)
45
46     assert len(tokenizer.analyser_cache['countries']) == 2
47
48     result_set = {k: set(v.values()) for k, v in tokenizer.analyser_cache['countries']}
49
50     if languages:
51         assert result_set == {'us' : set(('us', 'us1', 'United States')),
52                               'fr' : set(('fr', 'Fra', 'Fren'))}
53     else:
54         assert result_set == {'us' : set(('us', 'us1', 'us2', 'United States')),
55                               'fr' : set(('fr', 'Fra', 'Fren'))}