]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/tools/refresh.py
Merge remote-tracking branch 'upstream/master'
[nominatim.git] / nominatim / tools / refresh.py
index 77eecf0457119c5d338af71c160659beca9f9a44..a200ee1348b9fdc717cd8db39c3e7bffd1438a64 100644 (file)
@@ -1,32 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2022 by the Nominatim developer community.
+# For a full list of authors see the git log.
 """
 Functions for bringing auxiliary data in the database up-to-date.
 """
-import json
+from typing import MutableSequence, Tuple, Any, Type, Mapping, Sequence, List, cast
+import csv
+import gzip
 import logging
 from textwrap import dedent
+from pathlib import Path
 
-from psycopg2.extras import execute_values
+from psycopg2 import sql as pysql
 
-from nominatim.db.utils import execute_file
+from nominatim.config import Configuration
+from nominatim.db.connection import Connection, connect
+from nominatim.db.utils import execute_file, CopyBuffer
 from nominatim.db.sql_preprocessor import SQLPreprocessor
 from nominatim.version import NOMINATIM_VERSION
 
 LOG = logging.getLogger()
 
-def update_postcodes(dsn, sql_dir):
-    """ Recalculate postcode centroids and add, remove and update entries in the
-        location_postcode table. `conn` is an opne connection to the database.
-    """
-    execute_file(dsn, sql_dir / 'update-postcodes.sql')
-
-
-def recompute_word_counts(dsn, sql_dir):
-    """ Compute the frequency of full-word search terms.
-    """
-    execute_file(dsn, sql_dir / 'words_from_search_name.sql')
+OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation'}
 
-
-def _add_address_level_rows_from_entry(rows, entry):
+def _add_address_level_rows_from_entry(rows: MutableSequence[Tuple[Any, ...]],
+                                       entry: Mapping[str, Any]) -> None:
     """ Converts a single entry from the JSON format for address rank
         descriptions into a flat format suitable for inserting into a
         PostgreSQL table and adds these lines to `rows`.
@@ -43,45 +44,54 @@ def _add_address_level_rows_from_entry(rows, entry):
             for country in countries:
                 rows.append((country, key, value, rank_search, rank_address))
 
-def load_address_levels(conn, table, levels):
+
+def load_address_levels(conn: Connection, table: str, levels: Sequence[Mapping[str, Any]]) -> None:
     """ Replace the `address_levels` table with the contents of `levels'.
 
         A new table is created any previously existing table is dropped.
         The table has the following columns:
             country, class, type, rank_search, rank_address
     """
-    rows = []
+    rows: List[Tuple[Any, ...]]  = []
     for entry in levels:
         _add_address_level_rows_from_entry(rows, entry)
 
     with conn.cursor() as cur:
-        cur.execute('DROP TABLE IF EXISTS {}'.format(table))
+        cur.drop_table(table)
 
-        cur.execute("""CREATE TABLE {} (country_code varchar(2),
+        cur.execute(pysql.SQL("""CREATE TABLE {} (
+                                        country_code varchar(2),
                                         class TEXT,
                                         type TEXT,
                                         rank_search SMALLINT,
-                                        rank_address SMALLINT)""".format(table))
+                                        rank_address SMALLINT)
+                              """).format(pysql.Identifier(table)))
 
-        execute_values(cur, "INSERT INTO {} VALUES %s".format(table), rows)
+        cur.execute_values(pysql.SQL("INSERT INTO {} VALUES %s")
+                           .format(pysql.Identifier(table)), rows)
 
-        cur.execute('CREATE UNIQUE INDEX ON {} (country_code, class, type)'.format(table))
+        cur.execute(pysql.SQL('CREATE UNIQUE INDEX ON {} (country_code, class, type)')
+                    .format(pysql.Identifier(table)))
 
     conn.commit()
 
-def load_address_levels_from_file(conn, config_file):
-    """ Replace the `address_levels` table with the contents of the config
-        file.
+
+def load_address_levels_from_config(conn: Connection, config: Configuration) -> None:
+    """ Replace the `address_levels` table with the content as
+        defined in the given configuration. Uses the parameter
+        NOMINATIM_ADDRESS_LEVEL_CONFIG to determine the location of the
+        configuration file.
     """
-    with config_file.open('r') as fdesc:
-        load_address_levels(conn, 'address_levels', json.load(fdesc))
+    cfg = config.load_sub_configuration('', config='ADDRESS_LEVEL_CONFIG')
+    load_address_levels(conn, 'address_levels', cfg)
 
 
-def create_functions(conn, config, sqllib_dir,
-                     enable_diff_updates=True, enable_debug=False):
+def create_functions(conn: Connection, config: Configuration,
+                     enable_diff_updates: bool = True,
+                     enable_debug: bool = False) -> None:
     """ (Re)create the PL/pgSQL functions.
     """
-    sql = SQLPreprocessor(conn, config, sqllib_dir)
+    sql = SQLPreprocessor(conn, config)
 
     sql.run_sql_file(conn, 'functions.sql',
                      disable_diff_updates=not enable_diff_updates,
@@ -104,45 +114,130 @@ PHP_CONST_DEFS = (
     ('Database_DSN', 'DATABASE_DSN', str),
     ('Default_Language', 'DEFAULT_LANGUAGE', str),
     ('Log_DB', 'LOG_DB', bool),
-    ('Log_File', 'LOG_FILE', str),
-    ('Max_Word_Frequency', 'MAX_WORD_FREQUENCY', int),
+    ('Log_File', 'LOG_FILE', Path),
     ('NoAccessControl', 'CORS_NOACCESSCONTROL', bool),
     ('Places_Max_ID_count', 'LOOKUP_MAX_COUNT', int),
     ('PolygonOutput_MaximumTypes', 'POLYGON_OUTPUT_MAX_TYPES', int),
     ('Search_BatchMode', 'SEARCH_BATCH_MODE', bool),
     ('Search_NameOnlySearchFrequencyThreshold', 'SEARCH_NAME_ONLY_THRESHOLD', str),
-    ('Term_Normalization_Rules', 'TERM_NORMALIZATION', str),
-    ('Use_Aux_Location_data', 'USE_AUX_LOCATION_DATA', bool),
     ('Use_US_Tiger_Data', 'USE_US_TIGER_DATA', bool),
     ('MapIcon_URL', 'MAPICON_URL', str),
+    ('Search_WithinCountries', 'SEARCH_WITHIN_COUNTRIES', bool),
 )
 
 
-def import_wikipedia_articles(dsn, data_path, ignore_errors=False):
+def import_wikipedia_articles(dsn: str, data_path: Path, ignore_errors: bool = False) -> int:
     """ Replaces the wikipedia importance tables with new data.
         The import is run in a single transaction so that the new data
-        is replace seemlessly.
+        is replace seamlessly.
 
         Returns 0 if all was well and 1 if the importance file could not
         be found. Throws an exception if there was an error reading the file.
     """
-    datafile = data_path / 'wikimedia-importance.sql.gz'
+    if import_importance_csv(dsn, data_path / 'wikimedia-importance.csv.gz') == 0 \
+       or import_importance_sql(dsn, data_path / 'wikimedia-importance.sql.gz',
+                                ignore_errors) == 0:
+        return 0
 
-    if not datafile.exists():
+    return 1
+
+
+def import_importance_csv(dsn: str, data_file: Path) -> int:
+    """ Replace wikipedia importance table with data from a
+        single CSV file.
+
+        The file must be a gzipped CSV and have the following columns:
+        language, title, importance, wikidata_id
+
+        Other columns may be present but will be ignored.
+    """
+    if not data_file.exists():
+        return 1
+
+    # Only import the first occurance of a wikidata ID.
+    # This keeps indexes and table small.
+    wd_done = set()
+
+    with connect(dsn) as conn:
+        with conn.cursor() as cur:
+            cur.drop_table('wikipedia_article')
+            cur.drop_table('wikipedia_redirect')
+            cur.drop_table('wikimedia_importance')
+            cur.execute("""CREATE TABLE wikimedia_importance (
+                             language TEXT NOT NULL,
+                             title TEXT NOT NULL,
+                             importance double precision NOT NULL,
+                             wikidata TEXT
+                           ) """)
+
+        with gzip.open(str(data_file), 'rt') as fd, CopyBuffer() as buf:
+            for row in csv.DictReader(fd, delimiter='\t', quotechar='|'):
+                wd_id = int(row['wikidata_id'][1:])
+                buf.add(row['language'], row['title'], row['importance'],
+                        None if wd_id in wd_done else row['wikidata_id'])
+                wd_done.add(wd_id)
+
+                if buf.size() > 10000000:
+                    with conn.cursor() as cur:
+                        buf.copy_out(cur, 'wikimedia_importance',
+                                     columns=['language', 'title', 'importance',
+                                              'wikidata'])
+
+            with conn.cursor() as cur:
+                buf.copy_out(cur, 'wikimedia_importance',
+                             columns=['language', 'title', 'importance', 'wikidata'])
+
+        with conn.cursor() as cur:
+            cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_title
+                           ON wikimedia_importance (title)""")
+            cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_wikidata
+                           ON wikimedia_importance (wikidata)
+                           WHERE wikidata is not null""")
+
+        conn.commit()
+
+    return 0
+
+
+def import_importance_sql(dsn: str, data_file: Path, ignore_errors: bool) -> int:
+    """ Replace wikipedia importance table with data from an SQL file.
+    """
+    if not data_file.exists():
         return 1
 
     pre_code = """BEGIN;
                   DROP TABLE IF EXISTS "wikipedia_article";
-                  DROP TABLE IF EXISTS "wikipedia_redirect"
+                  DROP TABLE IF EXISTS "wikipedia_redirect";
+                  DROP TABLE IF EXISTS "wikipedia_importance";
                """
     post_code = "COMMIT"
-    execute_file(dsn, datafile, ignore_errors=ignore_errors,
+    execute_file(dsn, data_file, ignore_errors=ignore_errors,
                  pre_code=pre_code, post_code=post_code)
 
     return 0
 
 
-def recompute_importance(conn):
+def import_secondary_importance(dsn: str, data_path: Path, ignore_errors: bool = False) -> int:
+    """ Replaces the secondary importance raster data table with new data.
+
+        Returns 0 if all was well and 1 if the raster SQL file could not
+        be found. Throws an exception if there was an error reading the file.
+    """
+    datafile = data_path / 'secondary_importance.sql.gz'
+    if not datafile.exists():
+        return 1
+
+    with connect(dsn) as conn:
+        postgis_version = conn.postgis_version_tuple()
+        if postgis_version[0] < 3:
+            LOG.error('PostGIS version is too old for using OSM raster data.')
+            return 2
+
+    execute_file(dsn, datafile, ignore_errors=ignore_errors)
+
+    return 0
+
+def recompute_importance(conn: Connection) -> None:
     """ Recompute wikipedia links and importance for all entries in placex.
         This is a long-running operations that must not be executed in
         parallel with updates.
@@ -152,7 +247,7 @@ def recompute_importance(conn):
         cur.execute("""
             UPDATE placex SET (wikipedia, importance) =
                (SELECT wikipedia, importance
-                FROM compute_importance(extratags, country_code, osm_type, osm_id))
+                FROM compute_importance(extratags, country_code, rank_search, centroid))
             """)
         cur.execute("""
             UPDATE placex s SET wikipedia = d.wikipedia, importance = d.importance
@@ -165,35 +260,87 @@ def recompute_importance(conn):
     conn.commit()
 
 
-def setup_website(basedir, phplib_dir, config):
+def _quote_php_variable(var_type: Type[Any], config: Configuration,
+                        conf_name: str) -> str:
+    if var_type == bool:
+        return 'true' if config.get_bool(conf_name) else 'false'
+
+    if var_type == int:
+        return cast(str, getattr(config, conf_name))
+
+    if not getattr(config, conf_name):
+        return 'false'
+
+    if var_type == Path:
+        value = str(config.get_path(conf_name) or '')
+    else:
+        value = getattr(config, conf_name)
+
+    quoted = value.replace("'", "\\'")
+    return f"'{quoted}'"
+
+
+def setup_website(basedir: Path, config: Configuration, conn: Connection) -> None:
     """ Create the website script stubs.
     """
+    if config.lib_dir.php is None:
+        LOG.info("Python frontend does not require website setup. Skipping.")
+        return
+
     if not basedir.exists():
         LOG.info('Creating website directory.')
         basedir.mkdir()
 
-    template = dedent("""\
+    assert config.project_dir is not None
+    basedata = dedent(f"""\
                       <?php
 
                       @define('CONST_Debug', $_GET['debug'] ?? false);
-                      @define('CONST_LibDir', '{0}');
-                      @define('CONST_NominatimVersion', '{1[0]}.{1[1]}.{1[2]}-{1[3]}');
+                      @define('CONST_LibDir', '{config.lib_dir.php}');
+                      @define('CONST_TokenizerDir', '{config.project_dir / 'tokenizer'}');
+                      @define('CONST_NominatimVersion', '{NOMINATIM_VERSION!s}');
 
-                      """.format(phplib_dir, NOMINATIM_VERSION))
+                      """)
 
     for php_name, conf_name, var_type in PHP_CONST_DEFS:
-        if var_type == bool:
-            varout = 'true' if config.get_bool(conf_name) else 'false'
-        elif var_type == int:
-            varout = getattr(config, conf_name)
-        elif not getattr(config, conf_name):
-            varout = 'false'
-        else:
-            varout = "'{}'".format(getattr(config, conf_name).replace("'", "\\'"))
+        varout = _quote_php_variable(var_type, config, conf_name)
 
-        template += "@define('CONST_{}', {});\n".format(php_name, varout)
+        basedata += f"@define('CONST_{php_name}', {varout});\n"
 
-    template += "\nrequire_once('{}/website/{{}}');\n".format(phplib_dir)
+    template = "\nrequire_once(CONST_LibDir.'/website/{}');\n"
+
+    search_name_table_exists = bool(conn and conn.table_exists('search_name'))
 
     for script in WEBSITE_SCRIPTS:
-        (basedir / script).write_text(template.format(script), 'utf-8')
+        if not search_name_table_exists and script == 'search.php':
+            out = template.format('reverse-only-search.php')
+        else:
+            out = template.format(script)
+
+        (basedir / script).write_text(basedata + out, 'utf-8')
+
+
+def invalidate_osm_object(osm_type: str, osm_id: int, conn: Connection,
+                          recursive: bool = True) -> None:
+    """ Mark the given OSM object for reindexing. When 'recursive' is set
+        to True (the default), then all dependent objects are marked for
+        reindexing as well.
+
+        'osm_type' must be on of 'N' (node), 'W' (way) or 'R' (relation).
+        If the given object does not exist, then nothing happens.
+    """
+    assert osm_type in ('N', 'R', 'W')
+
+    LOG.warning("Invalidating OSM %s %s%s.",
+                OSM_TYPE[osm_type], osm_id,
+                ' and its dependent places' if recursive else '')
+
+    with conn.cursor() as cur:
+        if recursive:
+            sql = """SELECT place_force_update(place_id)
+                     FROM placex WHERE osm_type = %s and osm_id = %s"""
+        else:
+            sql = """UPDATE placex SET indexed_status = 2
+                     WHERE osm_type = %s and osm_id = %s"""
+
+        cur.execute(sql, (osm_type, osm_id))