2 Tokenizer implementing normalisation as used before Nominatim 4.
4 from collections import OrderedDict
9 from icu import Transliterator
11 import psycopg2.extras
13 from nominatim.db.connection import connect
14 from nominatim.db import properties
15 from nominatim.db import utils as db_utils
16 from nominatim.db.sql_preprocessor import SQLPreprocessor
17 from nominatim.errors import UsageError
19 DBCFG_NORMALIZATION = "tokenizer_normalization"
20 DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
22 LOG = logging.getLogger()
24 def create(dsn, data_dir):
25 """ Create a new instance of the tokenizer provided by this module.
27 return LegacyTokenizer(dsn, data_dir)
30 def _install_module(config_module_path, src_dir, module_dir):
31 """ Copies the PostgreSQL normalisation module into the project
32 directory if necessary. For historical reasons the module is
33 saved in the '/module' subdirectory and not with the other tokenizer
36 The function detects when the installation is run from the
37 build directory. It doesn't touch the module in that case.
39 # Custom module locations are simply used as is.
40 if config_module_path:
41 LOG.info("Using custom path for database module at '%s'", config_module_path)
42 return config_module_path
44 # Compatibility mode for builddir installations.
45 if module_dir.exists() and src_dir.samefile(module_dir):
46 LOG.info('Running from build directory. Leaving database module as is.')
49 # In any other case install the module in the project directory.
50 if not module_dir.exists():
53 destfile = module_dir / 'nominatim.so'
54 shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
57 LOG.info('Database module installed at %s', str(destfile))
62 def _check_module(module_dir, conn):
63 """ Try to use the PostgreSQL module to confirm that it is correctly
64 installed and accessible from PostgreSQL.
66 with conn.cursor() as cur:
68 cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
69 RETURNS text AS '{}/nominatim.so', 'transliteration'
70 LANGUAGE c IMMUTABLE STRICT;
71 DROP FUNCTION nominatim_test_import_func(text)
72 """.format(module_dir))
73 except psycopg2.DatabaseError as err:
74 LOG.fatal("Error accessing database module: %s", err)
75 raise UsageError("Database module cannot be accessed.") from err
78 class LegacyTokenizer:
79 """ The legacy tokenizer uses a special PostgreSQL module to normalize
80 names and queries. The tokenizer thus implements normalization through
81 calls to the database.
84 def __init__(self, dsn, data_dir):
86 self.data_dir = data_dir
87 self.normalization = None
90 def init_new_db(self, config):
91 """ Set up a new tokenizer for the database.
93 This copies all necessary data in the project directory to make
94 sure the tokenizer remains stable even over updates.
96 module_dir = _install_module(config.DATABASE_MODULE_PATH,
97 config.lib_dir.module,
98 config.project_dir / 'module')
100 self.normalization = config.TERM_NORMALIZATION
102 with connect(self.dsn) as conn:
103 _check_module(module_dir, conn)
104 self._save_config(conn, config)
107 self.update_sql_functions(config)
108 self._init_db_tables(config)
111 def init_from_project(self):
112 """ Initialise the tokenizer from the project directory.
114 with connect(self.dsn) as conn:
115 self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
118 def update_sql_functions(self, config):
119 """ Reimport the SQL functions for this tokenizer.
121 with connect(self.dsn) as conn:
122 max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
123 modulepath = config.DATABASE_MODULE_PATH or \
124 str((config.project_dir / 'module').resolve())
125 sqlp = SQLPreprocessor(conn, config)
126 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
127 max_word_freq=max_word_freq,
128 modulepath=modulepath)
131 def migrate_database(self, config):
132 """ Initialise the project directory of an existing database for
133 use with this tokenizer.
135 This is a special migration function for updating existing databases
136 to new software versions.
138 self.normalization = config.TERM_NORMALIZATION
139 module_dir = _install_module(config.DATABASE_MODULE_PATH,
140 config.lib_dir.module,
141 config.project_dir / 'module')
143 with connect(self.dsn) as conn:
144 _check_module(module_dir, conn)
145 self._save_config(conn, config)
148 def name_analyzer(self):
149 """ Create a new analyzer for tokenizing names and queries
150 using this tokinzer. Analyzers are context managers and should
154 with tokenizer.name_analyzer() as analyzer:
158 When used outside the with construct, the caller must ensure to
159 call the close() function before destructing the analyzer.
161 Analyzers are not thread-safe. You need to instantiate one per thread.
163 normalizer = Transliterator.createFromRules("phrase normalizer",
165 return LegacyNameAnalyzer(self.dsn, normalizer)
168 def _init_db_tables(self, config):
169 """ Set up the word table and fill it with pre-computed word
172 with connect(self.dsn) as conn:
173 sqlp = SQLPreprocessor(conn, config)
174 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
177 LOG.warning("Precomputing word tokens")
178 db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
181 def _save_config(self, conn, config):
182 """ Save the configuration that needs to remain stable for the given
183 database as database properties.
185 properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
186 properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
189 class LegacyNameAnalyzer:
190 """ The legacy analyzer uses the special Postgresql module for
193 Each instance opens a connection to the database to request the
197 def __init__(self, dsn, normalizer):
198 self.conn = connect(dsn).connection
199 self.conn.autocommit = True
200 self.normalizer = normalizer
201 psycopg2.extras.register_hstore(self.conn)
203 self._cache = _TokenCache(self.conn)
210 def __exit__(self, exc_type, exc_value, traceback):
215 """ Free all resources used by the analyzer.
222 def normalize(self, phrase):
223 """ Normalize the given phrase, i.e. remove all properties that
224 are irrelevant for search.
226 return self.normalizer.transliterate(phrase)
229 def add_postcodes_from_db(self):
230 """ Add postcodes from the location_postcode table to the word table.
232 with self.conn.cursor() as cur:
233 cur.execute("""SELECT count(create_postcode_id(pc))
234 FROM (SELECT distinct(postcode) as pc
235 FROM location_postcode) x""")
238 def update_special_phrases(self, phrases):
239 """ Replace the search index for special phrases with the new phrases.
241 norm_phrases = set(((self.normalize(p[0]), p[1], p[2], p[3])
244 with self.conn.cursor() as cur:
245 # Get the old phrases.
246 existing_phrases = set()
247 cur.execute("""SELECT word, class, type, operator FROM word
248 WHERE class != 'place'
249 OR (type != 'house' AND type != 'postcode')""")
250 for label, cls, typ, oper in cur:
251 existing_phrases.add((label, cls, typ, oper or '-'))
253 to_add = norm_phrases - existing_phrases
254 to_delete = existing_phrases - norm_phrases
257 psycopg2.extras.execute_values(
259 """ INSERT INTO word (word_id, word_token, word, class, type,
260 search_name_count, operator)
261 (SELECT nextval('seq_word'), make_standard_name(name), name,
263 CASE WHEN op in ('in', 'near') THEN op ELSE null END
264 FROM (VALUES %s) as v(name, class, type, op))""",
268 psycopg2.extras.execute_values(
270 """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
271 WHERE word = name and class = in_class and type = in_type
272 and ((op = '-' and operator is null) or op = operator)""",
275 LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
276 len(norm_phrases), len(to_add), len(to_delete))
279 def add_country_names(self, country_code, names):
280 """ Add names for the given country to the search index.
282 with self.conn.cursor() as cur:
284 """INSERT INTO word (word_id, word_token, country_code)
285 (SELECT nextval('seq_word'), lookup_token, %s
286 FROM (SELECT ' ' || make_standard_name(n) as lookup_token
288 WHERE NOT EXISTS(SELECT * FROM word
289 WHERE word_token = lookup_token and country_code = %s))
290 """, (country_code, names, country_code))
293 def process_place(self, place):
294 """ Determine tokenizer information about the given place.
296 Returns a JSON-serialisable structure that will be handed into
297 the database via the token_info field.
299 token_info = _TokenInfo(self._cache)
301 names = place.get('name')
304 token_info.add_names(self.conn, names)
306 country_feature = place.get('country_feature')
307 if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
308 self.add_country_names(country_feature.lower(), list(names.values()))
310 address = place.get('address')
315 for key, value in address.items():
316 if key == 'postcode':
317 self._add_postcode(value)
318 elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
320 elif key == 'street':
321 token_info.add_street(self.conn, value)
323 token_info.add_place(self.conn, value)
324 elif not key.startswith('_') and \
325 key not in ('country', 'full'):
326 addr_terms.append((key, value))
329 token_info.add_housenumbers(self.conn, hnrs)
332 token_info.add_address_terms(self.conn, addr_terms)
334 return token_info.data
337 def _add_postcode(self, postcode):
338 """ Make sure the normalized postcode is present in the word table.
340 def _create_postcode_from_db(pcode):
341 with self.conn.cursor() as cur:
342 cur.execute('SELECT create_postcode_id(%s)', (pcode, ))
344 if re.search(r'[:,;]', postcode) is None:
345 self._cache.postcodes.get(postcode.strip().upper(), _create_postcode_from_db)
349 """ Collect token information to be sent back to the database.
351 def __init__(self, cache):
356 def add_names(self, conn, names):
357 """ Add token information for the names of the place.
359 with conn.cursor() as cur:
360 # Create the token IDs for all names.
361 self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
365 def add_housenumbers(self, conn, hnrs):
366 """ Extract housenumber information from the address.
369 token = self.cache.get_housenumber(hnrs[0])
370 if token is not None:
371 self.data['hnr_tokens'] = token
372 self.data['hnr'] = hnrs[0]
375 # split numbers if necessary
378 simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
380 if len(simple_list) > 1:
381 simple_list = list(set(simple_list))
383 with conn.cursor() as cur:
384 cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
385 self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
388 def add_street(self, conn, street):
389 """ Add addr:street match terms.
391 def _get_street(name):
392 with conn.cursor() as cur:
393 return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))
395 self.data['street'] = self.cache.streets.get(street, _get_street)
398 def add_place(self, conn, place):
399 """ Add addr:place search and match terms.
401 def _get_place(name):
402 with conn.cursor() as cur:
403 cur.execute("""SELECT (addr_ids_from_name(%s)
404 || getorcreate_name_id(make_standard_name(%s), ''))::text,
405 word_ids_from_name(%s)::text""",
407 return cur.fetchone()
409 self.data['place_search'], self.data['place_match'] = \
410 self.cache.places.get(place, _get_place)
413 def add_address_terms(self, conn, terms):
414 """ Add additional address terms.
416 def _get_address_term(name):
417 with conn.cursor() as cur:
418 cur.execute("""SELECT addr_ids_from_name(%s)::text,
419 word_ids_from_name(%s)::text""",
421 return cur.fetchone()
424 for key, value in terms:
425 tokens[key] = self.cache.address_terms.get(value, _get_address_term)
427 self.data['addr'] = tokens
431 """ Least recently used cache that accepts a generator function to
432 produce the item when there is a cache miss.
435 def __init__(self, maxsize=128, init_data=None):
436 self.data = init_data or OrderedDict()
437 self.maxsize = maxsize
438 if init_data is not None and len(init_data) > maxsize:
439 self.maxsize = len(init_data)
441 def get(self, key, generator):
442 """ Get the item with the given key from the cache. If nothing
443 is found in the cache, generate the value through the
444 generator function and store it in the cache.
446 value = self.data.get(key)
447 if value is not None:
448 self.data.move_to_end(key)
450 value = generator(key)
451 if len(self.data) >= self.maxsize:
452 self.data.popitem(last=False)
453 self.data[key] = value
459 """ Cache for token information to avoid repeated database queries.
461 This cache is not thread-safe and needs to be instantiated per
464 def __init__(self, conn):
466 self.streets = _LRU(maxsize=256)
467 self.places = _LRU(maxsize=128)
468 self.address_terms = _LRU(maxsize=1024)
470 # Lookup houseunumbers up to 100 and cache them
471 with conn.cursor() as cur:
472 cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
473 FROM generate_series(1, 100) as i""")
474 self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
476 # Get postcodes that are already saved
477 postcodes = OrderedDict()
478 with conn.cursor() as cur:
479 cur.execute("""SELECT word FROM word
480 WHERE class ='place' and type = 'postcode'""")
482 postcodes[row[0]] = None
483 self.postcodes = _LRU(maxsize=32, init_data=postcodes)
485 def get_housenumber(self, number):
486 """ Get a housenumber token from the cache.
488 return self._cached_housenumbers.get(number)