X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/0ab0f0ea44c2e93043f00e90c2935f7e742ed9e4..a7a920a9a5b55bf4290b184f05898a8589c95b40:/nominatim/tools/refresh.py?ds=sidebyside diff --git a/nominatim/tools/refresh.py b/nominatim/tools/refresh.py index 531de14d..a200ee13 100644 --- a/nominatim/tools/refresh.py +++ b/nominatim/tools/refresh.py @@ -8,8 +8,9 @@ Functions for bringing auxiliary data in the database up-to-date. """ from typing import MutableSequence, Tuple, Any, Type, Mapping, Sequence, List, cast +import csv +import gzip import logging -import subprocess from textwrap import dedent from pathlib import Path @@ -17,9 +18,9 @@ from psycopg2 import sql as pysql from nominatim.config import Configuration from nominatim.db.connection import Connection, connect -from nominatim.db.utils import execute_file +from nominatim.db.utils import execute_file, CopyBuffer from nominatim.db.sql_preprocessor import SQLPreprocessor -from nominatim.version import version_str +from nominatim.version import NOMINATIM_VERSION LOG = logging.getLogger() @@ -121,6 +122,7 @@ PHP_CONST_DEFS = ( ('Search_NameOnlySearchFrequencyThreshold', 'SEARCH_NAME_ONLY_THRESHOLD', str), ('Use_US_Tiger_Data', 'USE_US_TIGER_DATA', bool), ('MapIcon_URL', 'MAPICON_URL', str), + ('Search_WithinCountries', 'SEARCH_WITHIN_COUNTRIES', bool), ) @@ -132,63 +134,106 @@ def import_wikipedia_articles(dsn: str, data_path: Path, ignore_errors: bool = F Returns 0 if all was well and 1 if the importance file could not be found. Throws an exception if there was an error reading the file. """ - datafile = data_path / 'wikimedia-importance.sql.gz' + if import_importance_csv(dsn, data_path / 'wikimedia-importance.csv.gz') == 0 \ + or import_importance_sql(dsn, data_path / 'wikimedia-importance.sql.gz', + ignore_errors) == 0: + return 0 - if not datafile.exists(): + return 1 + + +def import_importance_csv(dsn: str, data_file: Path) -> int: + """ Replace wikipedia importance table with data from a + single CSV file. + + The file must be a gzipped CSV and have the following columns: + language, title, importance, wikidata_id + + Other columns may be present but will be ignored. + """ + if not data_file.exists(): + return 1 + + # Only import the first occurance of a wikidata ID. + # This keeps indexes and table small. + wd_done = set() + + with connect(dsn) as conn: + with conn.cursor() as cur: + cur.drop_table('wikipedia_article') + cur.drop_table('wikipedia_redirect') + cur.drop_table('wikimedia_importance') + cur.execute("""CREATE TABLE wikimedia_importance ( + language TEXT NOT NULL, + title TEXT NOT NULL, + importance double precision NOT NULL, + wikidata TEXT + ) """) + + with gzip.open(str(data_file), 'rt') as fd, CopyBuffer() as buf: + for row in csv.DictReader(fd, delimiter='\t', quotechar='|'): + wd_id = int(row['wikidata_id'][1:]) + buf.add(row['language'], row['title'], row['importance'], + None if wd_id in wd_done else row['wikidata_id']) + wd_done.add(wd_id) + + if buf.size() > 10000000: + with conn.cursor() as cur: + buf.copy_out(cur, 'wikimedia_importance', + columns=['language', 'title', 'importance', + 'wikidata']) + + with conn.cursor() as cur: + buf.copy_out(cur, 'wikimedia_importance', + columns=['language', 'title', 'importance', 'wikidata']) + + with conn.cursor() as cur: + cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_title + ON wikimedia_importance (title)""") + cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_wikidata + ON wikimedia_importance (wikidata) + WHERE wikidata is not null""") + + conn.commit() + + return 0 + + +def import_importance_sql(dsn: str, data_file: Path, ignore_errors: bool) -> int: + """ Replace wikipedia importance table with data from an SQL file. + """ + if not data_file.exists(): return 1 pre_code = """BEGIN; DROP TABLE IF EXISTS "wikipedia_article"; - DROP TABLE IF EXISTS "wikipedia_redirect" + DROP TABLE IF EXISTS "wikipedia_redirect"; + DROP TABLE IF EXISTS "wikipedia_importance"; """ post_code = "COMMIT" - execute_file(dsn, datafile, ignore_errors=ignore_errors, + execute_file(dsn, data_file, ignore_errors=ignore_errors, pre_code=pre_code, post_code=post_code) return 0 -def import_osm_views_geotiff(dsn: str, data_path: Path) -> int: - """ Replaces the OSM views table with new data. - Returns 0 if all was well and 1 if the OSM views GeoTIFF file could not +def import_secondary_importance(dsn: str, data_path: Path, ignore_errors: bool = False) -> int: + """ Replaces the secondary importance raster data table with new data. + + Returns 0 if all was well and 1 if the raster SQL file could not be found. Throws an exception if there was an error reading the file. """ - datafile = data_path / 'osmviews.tiff' + datafile = data_path / 'secondary_importance.sql.gz' if not datafile.exists(): return 1 - with connect(dsn) as conn: + with connect(dsn) as conn: postgis_version = conn.postgis_version_tuple() if postgis_version[0] < 3: + LOG.error('PostGIS version is too old for using OSM raster data.') return 2 - with conn.cursor() as cur: - cur.drop_table("osm_views") - cur.drop_table("osm_views_stat") - - # -ovr: 6 -> zoom 12, 5 -> zoom 13, 4 -> zoom 14, 3 -> zoom 15 - reproject_geotiff = f"gdalwarp -q -multi -ovr 3 -overwrite \ - -co COMPRESS=LZW -tr 0.01 0.01 -t_srs EPSG:4326 {datafile} raster2import.tiff" - subprocess.run(["/bin/bash", "-c" , reproject_geotiff], check=True) - - tile_size = 256 - import_geotiff = f"raster2pgsql -I -C -Y -t {tile_size}x{tile_size} raster2import.tiff \ - public.osm_views | psql {dsn} > /dev/null" - subprocess.run(["/bin/bash", "-c" , import_geotiff], check=True) - - cleanup = "rm raster2import.tiff" - subprocess.run(["/bin/bash", "-c" , cleanup], check=True) - - # To normalize osm views data, the max view value is needed - cur.execute(f""" - CREATE TABLE osm_views_stat AS ( - SELECT MAX(ST_Value(osm_views.rast, 1, x, y)) AS max_views_count - FROM osm_views CROSS JOIN - generate_series(1, {tile_size}) As x - CROSS JOIN generate_series(1, {tile_size}) As y - WHERE x <= ST_Width(rast) AND y <= ST_Height(rast)); - """) - conn.commit() + execute_file(dsn, datafile, ignore_errors=ignore_errors) return 0 @@ -202,7 +247,7 @@ def recompute_importance(conn: Connection) -> None: cur.execute(""" UPDATE placex SET (wikipedia, importance) = (SELECT wikipedia, importance - FROM compute_importance(extratags, country_code, osm_type, osm_id, centroid)) + FROM compute_importance(extratags, country_code, rank_search, centroid)) """) cur.execute(""" UPDATE placex s SET wikipedia = d.wikipedia, importance = d.importance @@ -238,34 +283,41 @@ def _quote_php_variable(var_type: Type[Any], config: Configuration, def setup_website(basedir: Path, config: Configuration, conn: Connection) -> None: """ Create the website script stubs. """ + if config.lib_dir.php is None: + LOG.info("Python frontend does not require website setup. Skipping.") + return + if not basedir.exists(): LOG.info('Creating website directory.') basedir.mkdir() - template = dedent(f"""\ + assert config.project_dir is not None + basedata = dedent(f"""\