X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/b4fec57b6d53f8e8a45c46ff11f13cbcbea1006a..754846d9dc125e3caa8a97b8381918b565a55289:/nominatim/tokenizer/legacy_tokenizer.py?ds=sidebyside diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 24af1c3a..8957426b 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -16,6 +16,7 @@ from nominatim.db import properties from nominatim.db import utils as db_utils from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.errors import UsageError +from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer DBCFG_NORMALIZATION = "tokenizer_normalization" DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq" @@ -76,7 +77,7 @@ def _check_module(module_dir, conn): raise UsageError("Database module cannot be accessed.") from err -class LegacyTokenizer: +class LegacyTokenizer(AbstractTokenizer): """ The legacy tokenizer uses a special PostgreSQL module to normalize names and queries. The tokenizer thus implements normalization through calls to the database. @@ -238,7 +239,7 @@ class LegacyTokenizer: properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY) -class LegacyNameAnalyzer: +class LegacyNameAnalyzer(AbstractAnalyzer): """ The legacy analyzer uses the special Postgresql module for splitting names. @@ -255,14 +256,6 @@ class LegacyNameAnalyzer: self._cache = _TokenCache(self.conn) - def __enter__(self): - return self - - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): """ Free all resources used by the analyzer. """ @@ -370,8 +363,7 @@ class LegacyNameAnalyzer: to_delete = existing_phrases - norm_phrases if to_add: - psycopg2.extras.execute_values( - cur, + cur.execute_values( """ INSERT INTO word (word_id, word_token, word, class, type, search_name_count, operator) (SELECT nextval('seq_word'), ' ' || make_standard_name(name), name, @@ -381,8 +373,7 @@ class LegacyNameAnalyzer: to_add) if to_delete and should_replace: - psycopg2.extras.execute_values( - cur, + cur.execute_values( """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op) WHERE word = name and class = in_class and type = in_type and ((op = '-' and operator is null) or op = operator)""", @@ -582,7 +573,7 @@ class _TokenCache: with conn.cursor() as cur: cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text FROM generate_series(1, 100) as i""") - self._cached_housenumbers = {str(r[0]) : r[1] for r in cur} + self._cached_housenumbers = {str(r[0]): r[1] for r in cur} # For postcodes remember the ones that have already been added self.postcodes = set()