2 Tokenizer implementing normalisation as used before Nominatim 4.
4 from collections import OrderedDict
8 from textwrap import dedent
10 from icu import Transliterator
12 import psycopg2.extras
14 from nominatim.db.connection import connect
15 from nominatim.db import properties
16 from nominatim.db import utils as db_utils
17 from nominatim.db.sql_preprocessor import SQLPreprocessor
18 from nominatim.errors import UsageError
20 DBCFG_NORMALIZATION = "tokenizer_normalization"
21 DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
23 LOG = logging.getLogger()
25 def create(dsn, data_dir):
26 """ Create a new instance of the tokenizer provided by this module.
28 return LegacyTokenizer(dsn, data_dir)
31 def _install_module(config_module_path, src_dir, module_dir):
32 """ Copies the PostgreSQL normalisation module into the project
33 directory if necessary. For historical reasons the module is
34 saved in the '/module' subdirectory and not with the other tokenizer
37 The function detects when the installation is run from the
38 build directory. It doesn't touch the module in that case.
40 # Custom module locations are simply used as is.
41 if config_module_path:
42 LOG.info("Using custom path for database module at '%s'", config_module_path)
43 return config_module_path
45 # Compatibility mode for builddir installations.
46 if module_dir.exists() and src_dir.samefile(module_dir):
47 LOG.info('Running from build directory. Leaving database module as is.')
50 # In any other case install the module in the project directory.
51 if not module_dir.exists():
54 destfile = module_dir / 'nominatim.so'
55 shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
58 LOG.info('Database module installed at %s', str(destfile))
63 def _check_module(module_dir, conn):
64 """ Try to use the PostgreSQL module to confirm that it is correctly
65 installed and accessible from PostgreSQL.
67 with conn.cursor() as cur:
69 cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
70 RETURNS text AS '{}/nominatim.so', 'transliteration'
71 LANGUAGE c IMMUTABLE STRICT;
72 DROP FUNCTION nominatim_test_import_func(text)
73 """.format(module_dir))
74 except psycopg2.DatabaseError as err:
75 LOG.fatal("Error accessing database module: %s", err)
76 raise UsageError("Database module cannot be accessed.") from err
79 class LegacyTokenizer:
80 """ The legacy tokenizer uses a special PostgreSQL module to normalize
81 names and queries. The tokenizer thus implements normalization through
82 calls to the database.
85 def __init__(self, dsn, data_dir):
87 self.data_dir = data_dir
88 self.normalization = None
91 def init_new_db(self, config, init_db=True):
92 """ Set up a new tokenizer for the database.
94 This copies all necessary data in the project directory to make
95 sure the tokenizer remains stable even over updates.
97 module_dir = _install_module(config.DATABASE_MODULE_PATH,
98 config.lib_dir.module,
99 config.project_dir / 'module')
101 self.normalization = config.TERM_NORMALIZATION
103 self._install_php(config)
105 with connect(self.dsn) as conn:
106 _check_module(module_dir, conn)
107 self._save_config(conn, config)
111 self.update_sql_functions(config)
112 self._init_db_tables(config)
115 def init_from_project(self):
116 """ Initialise the tokenizer from the project directory.
118 with connect(self.dsn) as conn:
119 self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
122 def finalize_import(self, config):
123 """ Do any required postprocessing to make the tokenizer data ready
126 with connect(self.dsn) as conn:
127 sqlp = SQLPreprocessor(conn, config)
128 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
131 def update_sql_functions(self, config):
132 """ Reimport the SQL functions for this tokenizer.
134 with connect(self.dsn) as conn:
135 max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
136 modulepath = config.DATABASE_MODULE_PATH or \
137 str((config.project_dir / 'module').resolve())
138 sqlp = SQLPreprocessor(conn, config)
139 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
140 max_word_freq=max_word_freq,
141 modulepath=modulepath)
144 def check_database(self):
145 """ Check that the tokenizer is set up correctly.
148 The Postgresql extension nominatim.so was not correctly loaded.
153 * Check the output of the CMmake/make installation step
154 * Does nominatim.so exist?
155 * Does nominatim.so exist on the database server?
156 * Can nominatim.so be accessed by the database user?
158 with connect(self.dsn) as conn:
159 with conn.cursor() as cur:
161 out = cur.scalar("SELECT make_standard_name('a')")
162 except psycopg2.Error as err:
163 return hint.format(error=str(err))
166 return hint.format(error='Unexpected result for make_standard_name()')
171 def migrate_database(self, config):
172 """ Initialise the project directory of an existing database for
173 use with this tokenizer.
175 This is a special migration function for updating existing databases
176 to new software versions.
178 self.normalization = config.TERM_NORMALIZATION
179 module_dir = _install_module(config.DATABASE_MODULE_PATH,
180 config.lib_dir.module,
181 config.project_dir / 'module')
183 with connect(self.dsn) as conn:
184 _check_module(module_dir, conn)
185 self._save_config(conn, config)
188 def name_analyzer(self):
189 """ Create a new analyzer for tokenizing names and queries
190 using this tokinzer. Analyzers are context managers and should
194 with tokenizer.name_analyzer() as analyzer:
198 When used outside the with construct, the caller must ensure to
199 call the close() function before destructing the analyzer.
201 Analyzers are not thread-safe. You need to instantiate one per thread.
203 normalizer = Transliterator.createFromRules("phrase normalizer",
205 return LegacyNameAnalyzer(self.dsn, normalizer)
208 def _install_php(self, config):
209 """ Install the php script for the tokenizer.
211 php_file = self.data_dir / "tokenizer.php"
212 php_file.write_text(dedent("""\
214 @define('CONST_Max_Word_Frequency', {0.MAX_WORD_FREQUENCY});
215 @define('CONST_Term_Normalization_Rules', "{0.TERM_NORMALIZATION}");
216 require_once('{0.lib_dir.php}/tokenizer/legacy_tokenizer.php');
220 def _init_db_tables(self, config):
221 """ Set up the word table and fill it with pre-computed word
224 with connect(self.dsn) as conn:
225 sqlp = SQLPreprocessor(conn, config)
226 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
229 LOG.warning("Precomputing word tokens")
230 db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
233 def _save_config(self, conn, config):
234 """ Save the configuration that needs to remain stable for the given
235 database as database properties.
237 properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
238 properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
241 class LegacyNameAnalyzer:
242 """ The legacy analyzer uses the special Postgresql module for
245 Each instance opens a connection to the database to request the
249 def __init__(self, dsn, normalizer):
250 self.conn = connect(dsn).connection
251 self.conn.autocommit = True
252 self.normalizer = normalizer
253 psycopg2.extras.register_hstore(self.conn)
255 self._cache = _TokenCache(self.conn)
262 def __exit__(self, exc_type, exc_value, traceback):
267 """ Free all resources used by the analyzer.
274 def normalize(self, phrase):
275 """ Normalize the given phrase, i.e. remove all properties that
276 are irrelevant for search.
278 return self.normalizer.transliterate(phrase)
281 def add_postcodes_from_db(self):
282 """ Add postcodes from the location_postcode table to the word table.
284 with self.conn.cursor() as cur:
285 cur.execute("""SELECT count(create_postcode_id(pc))
286 FROM (SELECT distinct(postcode) as pc
287 FROM location_postcode) x""")
290 def update_special_phrases(self, phrases):
291 """ Replace the search index for special phrases with the new phrases.
293 norm_phrases = set(((self.normalize(p[0]), p[1], p[2], p[3])
296 with self.conn.cursor() as cur:
297 # Get the old phrases.
298 existing_phrases = set()
299 cur.execute("""SELECT word, class, type, operator FROM word
300 WHERE class != 'place'
301 OR (type != 'house' AND type != 'postcode')""")
302 for label, cls, typ, oper in cur:
303 existing_phrases.add((label, cls, typ, oper or '-'))
305 to_add = norm_phrases - existing_phrases
306 to_delete = existing_phrases - norm_phrases
309 psycopg2.extras.execute_values(
311 """ INSERT INTO word (word_id, word_token, word, class, type,
312 search_name_count, operator)
313 (SELECT nextval('seq_word'), make_standard_name(name), name,
315 CASE WHEN op in ('in', 'near') THEN op ELSE null END
316 FROM (VALUES %s) as v(name, class, type, op))""",
320 psycopg2.extras.execute_values(
322 """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
323 WHERE word = name and class = in_class and type = in_type
324 and ((op = '-' and operator is null) or op = operator)""",
327 LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
328 len(norm_phrases), len(to_add), len(to_delete))
331 def add_country_names(self, country_code, names):
332 """ Add names for the given country to the search index.
334 with self.conn.cursor() as cur:
336 """INSERT INTO word (word_id, word_token, country_code)
337 (SELECT nextval('seq_word'), lookup_token, %s
338 FROM (SELECT ' ' || make_standard_name(n) as lookup_token
340 WHERE NOT EXISTS(SELECT * FROM word
341 WHERE word_token = lookup_token and country_code = %s))
342 """, (country_code, names, country_code))
345 def process_place(self, place):
346 """ Determine tokenizer information about the given place.
348 Returns a JSON-serialisable structure that will be handed into
349 the database via the token_info field.
351 token_info = _TokenInfo(self._cache)
353 names = place.get('name')
356 token_info.add_names(self.conn, names)
358 country_feature = place.get('country_feature')
359 if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
360 self.add_country_names(country_feature.lower(), list(names.values()))
362 address = place.get('address')
367 for key, value in address.items():
368 if key == 'postcode':
369 self._add_postcode(value)
370 elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
372 elif key == 'street':
373 token_info.add_street(self.conn, value)
375 token_info.add_place(self.conn, value)
376 elif not key.startswith('_') and \
377 key not in ('country', 'full'):
378 addr_terms.append((key, value))
381 token_info.add_housenumbers(self.conn, hnrs)
384 token_info.add_address_terms(self.conn, addr_terms)
386 return token_info.data
389 def _add_postcode(self, postcode):
390 """ Make sure the normalized postcode is present in the word table.
392 def _create_postcode_from_db(pcode):
393 with self.conn.cursor() as cur:
394 cur.execute('SELECT create_postcode_id(%s)', (pcode, ))
396 if re.search(r'[:,;]', postcode) is None:
397 self._cache.postcodes.get(postcode.strip().upper(), _create_postcode_from_db)
401 """ Collect token information to be sent back to the database.
403 def __init__(self, cache):
408 def add_names(self, conn, names):
409 """ Add token information for the names of the place.
411 with conn.cursor() as cur:
412 # Create the token IDs for all names.
413 self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
417 def add_housenumbers(self, conn, hnrs):
418 """ Extract housenumber information from the address.
421 token = self.cache.get_housenumber(hnrs[0])
422 if token is not None:
423 self.data['hnr_tokens'] = token
424 self.data['hnr'] = hnrs[0]
427 # split numbers if necessary
430 simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
432 if len(simple_list) > 1:
433 simple_list = list(set(simple_list))
435 with conn.cursor() as cur:
436 cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
437 self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
440 def add_street(self, conn, street):
441 """ Add addr:street match terms.
443 def _get_street(name):
444 with conn.cursor() as cur:
445 return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))
447 self.data['street'] = self.cache.streets.get(street, _get_street)
450 def add_place(self, conn, place):
451 """ Add addr:place search and match terms.
453 def _get_place(name):
454 with conn.cursor() as cur:
455 cur.execute("""SELECT (addr_ids_from_name(%s)
456 || getorcreate_name_id(make_standard_name(%s), ''))::text,
457 word_ids_from_name(%s)::text""",
459 return cur.fetchone()
461 self.data['place_search'], self.data['place_match'] = \
462 self.cache.places.get(place, _get_place)
465 def add_address_terms(self, conn, terms):
466 """ Add additional address terms.
468 def _get_address_term(name):
469 with conn.cursor() as cur:
470 cur.execute("""SELECT addr_ids_from_name(%s)::text,
471 word_ids_from_name(%s)::text""",
473 return cur.fetchone()
476 for key, value in terms:
477 tokens[key] = self.cache.address_terms.get(value, _get_address_term)
479 self.data['addr'] = tokens
483 """ Least recently used cache that accepts a generator function to
484 produce the item when there is a cache miss.
487 def __init__(self, maxsize=128, init_data=None):
488 self.data = init_data or OrderedDict()
489 self.maxsize = maxsize
490 if init_data is not None and len(init_data) > maxsize:
491 self.maxsize = len(init_data)
493 def get(self, key, generator):
494 """ Get the item with the given key from the cache. If nothing
495 is found in the cache, generate the value through the
496 generator function and store it in the cache.
498 value = self.data.get(key)
499 if value is not None:
500 self.data.move_to_end(key)
502 value = generator(key)
503 if len(self.data) >= self.maxsize:
504 self.data.popitem(last=False)
505 self.data[key] = value
511 """ Cache for token information to avoid repeated database queries.
513 This cache is not thread-safe and needs to be instantiated per
516 def __init__(self, conn):
518 self.streets = _LRU(maxsize=256)
519 self.places = _LRU(maxsize=128)
520 self.address_terms = _LRU(maxsize=1024)
522 # Lookup houseunumbers up to 100 and cache them
523 with conn.cursor() as cur:
524 cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
525 FROM generate_series(1, 100) as i""")
526 self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
528 # Get postcodes that are already saved
529 postcodes = OrderedDict()
530 with conn.cursor() as cur:
531 cur.execute("""SELECT word FROM word
532 WHERE class ='place' and type = 'postcode'""")
534 postcodes[row[0]] = None
535 self.postcodes = _LRU(maxsize=32, init_data=postcodes)
537 def get_housenumber(self, number):
538 """ Get a housenumber token from the cache.
540 return self._cached_housenumbers.get(number)