2 Implementation of the 'import' subcommand.
5 from pathlib import Path
9 from nominatim.db.connection import connect
10 from nominatim.db import status, properties
11 from nominatim.version import NOMINATIM_VERSION
12 from nominatim.errors import UsageError
14 # Do not repeat documentation of subcommand classes.
15 # pylint: disable=C0111
16 # Using non-top-level imports to avoid eventually unused imports.
17 # pylint: disable=E0012,C0415
19 LOG = logging.getLogger()
23 Create a new Nominatim database from an OSM file.
28 group_name = parser.add_argument_group('Required arguments')
29 group = group_name.add_mutually_exclusive_group(required=True)
30 group.add_argument('--osm-file', metavar='FILE',
31 help='OSM file to be imported.')
32 group.add_argument('--continue', dest='continue_at',
33 choices=['load-data', 'indexing', 'db-postprocess'],
34 help='Continue an import that was interrupted')
35 group = parser.add_argument_group('Optional arguments')
36 group.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
37 help='Size of cache to be used by osm2pgsql (in MB)')
38 group.add_argument('--reverse-only', action='store_true',
39 help='Do not create tables and indexes for searching')
40 group.add_argument('--no-partitions', action='store_true',
41 help=("Do not partition search indices "
42 "(speeds up import of single country extracts)"))
43 group.add_argument('--no-updates', action='store_true',
44 help="Do not keep tables that are only needed for "
45 "updating the database later")
46 group = parser.add_argument_group('Expert options')
47 group.add_argument('--ignore-errors', action='store_true',
48 help='Continue import even when errors in SQL are present')
49 group.add_argument('--index-noanalyse', action='store_true',
50 help='Do not perform analyse operations during index')
54 def run(args): # pylint: disable=too-many-statements
55 from ..tools import database_import
56 from ..tools import refresh
57 from ..indexer.indexer import Indexer
58 from ..tools import postcodes
59 from ..tokenizer import factory as tokenizer_factory
61 if args.osm_file and not Path(args.osm_file).is_file():
62 LOG.fatal("OSM file '%s' does not exist.", args.osm_file)
63 raise UsageError('Cannot access file.')
65 if args.continue_at is None:
66 database_import.setup_database_skeleton(args.config.get_libpq_dsn(),
69 rouser=args.config.DATABASE_WEBUSER)
71 LOG.warning('Importing OSM data file')
72 database_import.import_osm_data(Path(args.osm_file),
73 args.osm2pgsql_options(0, 1),
75 ignore_errors=args.ignore_errors)
77 with connect(args.config.get_libpq_dsn()) as conn:
78 LOG.warning('Create functions (1st pass)')
79 refresh.create_functions(conn, args.config, False, False)
80 LOG.warning('Create tables')
81 database_import.create_tables(conn, args.config,
82 reverse_only=args.reverse_only)
83 refresh.load_address_levels_from_file(conn, Path(args.config.ADDRESS_LEVEL_CONFIG))
84 LOG.warning('Create functions (2nd pass)')
85 refresh.create_functions(conn, args.config, False, False)
86 LOG.warning('Create table triggers')
87 database_import.create_table_triggers(conn, args.config)
88 LOG.warning('Create partition tables')
89 database_import.create_partition_tables(conn, args.config)
90 LOG.warning('Create functions (3rd pass)')
91 refresh.create_functions(conn, args.config, False, False)
93 LOG.warning('Importing wikipedia importance data')
94 data_path = Path(args.config.WIKIPEDIA_DATA_PATH or args.project_dir)
95 if refresh.import_wikipedia_articles(args.config.get_libpq_dsn(),
97 LOG.error('Wikipedia importance dump file not found. '
98 'Will be using default importances.')
100 if args.continue_at is None or args.continue_at == 'load-data':
101 LOG.warning('Initialise tables')
102 with connect(args.config.get_libpq_dsn()) as conn:
103 database_import.truncate_data_tables(conn)
105 LOG.warning('Load data into placex table')
106 database_import.load_data(args.config.get_libpq_dsn(),
107 args.threads or psutil.cpu_count() or 1)
109 LOG.warning("Setting up tokenizer")
110 if args.continue_at is None or args.continue_at == 'load-data':
111 # (re)initialise the tokenizer data
112 tokenizer = tokenizer_factory.create_tokenizer(args.config)
114 # just load the tokenizer
115 tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
117 if args.continue_at is None or args.continue_at == 'load-data':
118 LOG.warning('Calculate postcodes')
119 postcodes.import_postcodes(args.config.get_libpq_dsn(), args.project_dir,
122 if args.continue_at is None or args.continue_at in ('load-data', 'indexing'):
123 if args.continue_at is not None and args.continue_at != 'load-data':
124 with connect(args.config.get_libpq_dsn()) as conn:
125 SetupAll._create_pending_index(conn, args.config.TABLESPACE_ADDRESS_INDEX)
126 LOG.warning('Indexing places')
127 indexer = Indexer(args.config.get_libpq_dsn(), tokenizer,
128 args.threads or psutil.cpu_count() or 1)
129 indexer.index_full(analyse=not args.index_noanalyse)
131 LOG.warning('Post-process tables')
132 with connect(args.config.get_libpq_dsn()) as conn:
133 database_import.create_search_indices(conn, args.config,
134 drop=args.no_updates)
135 LOG.warning('Create search index for default country names.')
136 database_import.create_country_names(conn, tokenizer,
137 args.config.LANGUAGES)
138 tokenizer.finalize_import(args.config)
140 webdir = args.project_dir / 'website'
141 LOG.warning('Setup website at %s', webdir)
142 refresh.setup_website(webdir, args.config)
144 with connect(args.config.get_libpq_dsn()) as conn:
146 dbdate = status.compute_database_date(conn)
147 status.set_status(conn, dbdate)
148 LOG.info('Database is at %s.', dbdate)
149 except Exception as exc: # pylint: disable=broad-except
150 LOG.error('Cannot determine date of database: %s', exc)
152 properties.set_property(conn, 'database_version',
153 '{0[0]}.{0[1]}.{0[2]}-{0[3]}'.format(NOMINATIM_VERSION))
159 def _create_pending_index(conn, tablespace):
160 """ Add a supporting index for finding places still to be indexed.
162 This index is normally created at the end of the import process
163 for later updates. When indexing was partially done, then this
164 index can greatly improve speed going through already indexed data.
166 if conn.index_exists('idx_placex_pendingsector'):
169 with conn.cursor() as cur:
170 LOG.warning('Creating support index')
172 tablespace = 'TABLESPACE ' + tablespace
173 cur.execute("""CREATE INDEX idx_placex_pendingsector
174 ON placex USING BTREE (rank_address,geometry_sector)
175 {} WHERE indexed_status > 0
176 """.format(tablespace))