X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/d8ed1bfc601c6eb19b2db50df466654e843f0777..ba8ed7967dc8450eb797e627809edafd3859acf7:/nominatim/tokenizer/legacy_tokenizer.py?ds=sidebyside diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 6ffdc4ef..2f060b84 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -1,10 +1,13 @@ """ Tokenizer implementing normalisation as used before Nominatim 4. """ +from collections import OrderedDict import logging import re import shutil +from textwrap import dedent +from icu import Transliterator import psycopg2 import psycopg2.extras @@ -85,7 +88,7 @@ class LegacyTokenizer: self.normalization = None - def init_new_db(self, config): + def init_new_db(self, config, init_db=True): """ Set up a new tokenizer for the database. This copies all necessary data in the project directory to make @@ -97,13 +100,16 @@ class LegacyTokenizer: self.normalization = config.TERM_NORMALIZATION + self._install_php(config) + with connect(self.dsn) as conn: _check_module(module_dir, conn) self._save_config(conn, config) conn.commit() - self.update_sql_functions(config) - self._init_db_tables(config) + if init_db: + self.update_sql_functions(config) + self._init_db_tables(config) def init_from_project(self): @@ -113,6 +119,15 @@ class LegacyTokenizer: self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION) + def finalize_import(self, config): + """ Do any required postprocessing to make the tokenizer data ready + for use. + """ + with connect(self.dsn) as conn: + sqlp = SQLPreprocessor(conn, config) + sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql') + + def update_sql_functions(self, config): """ Reimport the SQL functions for this tokenizer. """ @@ -126,6 +141,33 @@ class LegacyTokenizer: modulepath=modulepath) + def check_database(self): + """ Check that the tokenizer is set up correctly. + """ + hint = """\ + The Postgresql extension nominatim.so was not correctly loaded. + + Error: {error} + + Hints: + * Check the output of the CMmake/make installation step + * Does nominatim.so exist? + * Does nominatim.so exist on the database server? + * Can nominatim.so be accessed by the database user? + """ + with connect(self.dsn) as conn: + with conn.cursor() as cur: + try: + out = cur.scalar("SELECT make_standard_name('a')") + except psycopg2.Error as err: + return hint.format(error=str(err)) + + if out != 'a': + return hint.format(error='Unexpected result for make_standard_name()') + + return None + + def migrate_database(self, config): """ Initialise the project directory of an existing database for use with this tokenizer. @@ -133,6 +175,7 @@ class LegacyTokenizer: This is a special migration function for updating existing databases to new software versions. """ + self.normalization = config.TERM_NORMALIZATION module_dir = _install_module(config.DATABASE_MODULE_PATH, config.lib_dir.module, config.project_dir / 'module') @@ -157,7 +200,21 @@ class LegacyTokenizer: Analyzers are not thread-safe. You need to instantiate one per thread. """ - return LegacyNameAnalyzer(self.dsn) + normalizer = Transliterator.createFromRules("phrase normalizer", + self.normalization) + return LegacyNameAnalyzer(self.dsn, normalizer) + + + def _install_php(self, config): + """ Install the php script for the tokenizer. + """ + php_file = self.data_dir / "tokenizer.php" + php_file.write_text(dedent("""\ + maxsize: + self.maxsize = len(init_data) + + def get(self, key, generator): + """ Get the item with the given key from the cache. If nothing + is found in the cache, generate the value through the + generator function and store it in the cache. + """ + value = self.data.get(key) + if value is not None: + self.data.move_to_end(key) + else: + value = generator(key) + if len(self.data) >= self.maxsize: + self.data.popitem(last=False) + self.data[key] = value + + return value + + class _TokenCache: """ Cache for token information to avoid repeated database queries. @@ -292,12 +514,25 @@ class _TokenCache: analyzer. """ def __init__(self, conn): + # various LRU caches + self.streets = _LRU(maxsize=256) + self.places = _LRU(maxsize=128) + self.address_terms = _LRU(maxsize=1024) + # Lookup houseunumbers up to 100 and cache them with conn.cursor() as cur: cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text FROM generate_series(1, 100) as i""") self._cached_housenumbers = {str(r[0]) : r[1] for r in cur} + # Get postcodes that are already saved + postcodes = OrderedDict() + with conn.cursor() as cur: + cur.execute("""SELECT word FROM word + WHERE class ='place' and type = 'postcode'""") + for row in cur: + postcodes[row[0]] = None + self.postcodes = _LRU(maxsize=32, init_data=postcodes) def get_housenumber(self, number): """ Get a housenumber token from the cache.