X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/9e92759ac79f29093fe0cdd15f4b0360bc4b03c9..754846d9dc125e3caa8a97b8381918b565a55289:/nominatim/tokenizer/legacy_tokenizer.py?ds=sidebyside diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 0aacb57f..8957426b 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -5,7 +5,9 @@ from collections import OrderedDict import logging import re import shutil +from textwrap import dedent +from icu import Transliterator import psycopg2 import psycopg2.extras @@ -14,6 +16,7 @@ from nominatim.db import properties from nominatim.db import utils as db_utils from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.errors import UsageError +from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer DBCFG_NORMALIZATION = "tokenizer_normalization" DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq" @@ -74,7 +77,7 @@ def _check_module(module_dir, conn): raise UsageError("Database module cannot be accessed.") from err -class LegacyTokenizer: +class LegacyTokenizer(AbstractTokenizer): """ The legacy tokenizer uses a special PostgreSQL module to normalize names and queries. The tokenizer thus implements normalization through calls to the database. @@ -86,7 +89,7 @@ class LegacyTokenizer: self.normalization = None - def init_new_db(self, config): + def init_new_db(self, config, init_db=True): """ Set up a new tokenizer for the database. This copies all necessary data in the project directory to make @@ -98,13 +101,16 @@ class LegacyTokenizer: self.normalization = config.TERM_NORMALIZATION + self._install_php(config) + with connect(self.dsn) as conn: _check_module(module_dir, conn) self._save_config(conn, config) conn.commit() - self.update_sql_functions(config) - self._init_db_tables(config) + if init_db: + self.update_sql_functions(config) + self._init_db_tables(config) def init_from_project(self): @@ -114,6 +120,15 @@ class LegacyTokenizer: self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION) + def finalize_import(self, config): + """ Do any required postprocessing to make the tokenizer data ready + for use. + """ + with connect(self.dsn) as conn: + sqlp = SQLPreprocessor(conn, config) + sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql') + + def update_sql_functions(self, config): """ Reimport the SQL functions for this tokenizer. """ @@ -127,6 +142,33 @@ class LegacyTokenizer: modulepath=modulepath) + def check_database(self): + """ Check that the tokenizer is set up correctly. + """ + hint = """\ + The Postgresql extension nominatim.so was not correctly loaded. + + Error: {error} + + Hints: + * Check the output of the CMmake/make installation step + * Does nominatim.so exist? + * Does nominatim.so exist on the database server? + * Can nominatim.so be accessed by the database user? + """ + with connect(self.dsn) as conn: + with conn.cursor() as cur: + try: + out = cur.scalar("SELECT make_standard_name('a')") + except psycopg2.Error as err: + return hint.format(error=str(err)) + + if out != 'a': + return hint.format(error='Unexpected result for make_standard_name()') + + return None + + def migrate_database(self, config): """ Initialise the project directory of an existing database for use with this tokenizer. @@ -134,6 +176,7 @@ class LegacyTokenizer: This is a special migration function for updating existing databases to new software versions. """ + self.normalization = config.TERM_NORMALIZATION module_dir = _install_module(config.DATABASE_MODULE_PATH, config.lib_dir.module, config.project_dir / 'module') @@ -158,7 +201,21 @@ class LegacyTokenizer: Analyzers are not thread-safe. You need to instantiate one per thread. """ - return LegacyNameAnalyzer(self.dsn) + normalizer = Transliterator.createFromRules("phrase normalizer", + self.normalization) + return LegacyNameAnalyzer(self.dsn, normalizer) + + + def _install_php(self, config): + """ Install the php script for the tokenizer. + """ + php_file = self.data_dir / "tokenizer.php" + php_file.write_text(dedent("""\ + maxsize: + self.maxsize = len(init_data) def get(self, key, generator): """ Get the item with the given key from the cache. If nothing @@ -386,7 +565,6 @@ class _TokenCache: """ def __init__(self, conn): # various LRU caches - self.postcodes = _LRU(maxsize=32) self.streets = _LRU(maxsize=256) self.places = _LRU(maxsize=128) self.address_terms = _LRU(maxsize=1024) @@ -395,10 +573,21 @@ class _TokenCache: with conn.cursor() as cur: cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text FROM generate_series(1, 100) as i""") - self._cached_housenumbers = {str(r[0]) : r[1] for r in cur} + self._cached_housenumbers = {str(r[0]): r[1] for r in cur} + # For postcodes remember the ones that have already been added + self.postcodes = set() def get_housenumber(self, number): """ Get a housenumber token from the cache. """ return self._cached_housenumbers.get(number) + + + def add_postcode(self, conn, postcode): + """ Make sure the given postcode is in the database. + """ + if postcode not in self.postcodes: + with conn.cursor() as cur: + cur.execute('SELECT create_postcode_id(%s)', (postcode, )) + self.postcodes.add(postcode)