2 Tokenizer implementing normalisation as used before Nominatim 4.
4 from collections import OrderedDict
8 from textwrap import dedent
10 from icu import Transliterator
12 import psycopg2.extras
14 from nominatim.db.connection import connect
15 from nominatim.db import properties
16 from nominatim.db import utils as db_utils
17 from nominatim.db.sql_preprocessor import SQLPreprocessor
18 from nominatim.errors import UsageError
20 DBCFG_NORMALIZATION = "tokenizer_normalization"
21 DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
23 LOG = logging.getLogger()
25 def create(dsn, data_dir):
26 """ Create a new instance of the tokenizer provided by this module.
28 return LegacyTokenizer(dsn, data_dir)
31 def _install_module(config_module_path, src_dir, module_dir):
32 """ Copies the PostgreSQL normalisation module into the project
33 directory if necessary. For historical reasons the module is
34 saved in the '/module' subdirectory and not with the other tokenizer
37 The function detects when the installation is run from the
38 build directory. It doesn't touch the module in that case.
40 # Custom module locations are simply used as is.
41 if config_module_path:
42 LOG.info("Using custom path for database module at '%s'", config_module_path)
43 return config_module_path
45 # Compatibility mode for builddir installations.
46 if module_dir.exists() and src_dir.samefile(module_dir):
47 LOG.info('Running from build directory. Leaving database module as is.')
50 # In any other case install the module in the project directory.
51 if not module_dir.exists():
54 destfile = module_dir / 'nominatim.so'
55 shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
58 LOG.info('Database module installed at %s', str(destfile))
63 def _check_module(module_dir, conn):
64 """ Try to use the PostgreSQL module to confirm that it is correctly
65 installed and accessible from PostgreSQL.
67 with conn.cursor() as cur:
69 cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
70 RETURNS text AS '{}/nominatim.so', 'transliteration'
71 LANGUAGE c IMMUTABLE STRICT;
72 DROP FUNCTION nominatim_test_import_func(text)
73 """.format(module_dir))
74 except psycopg2.DatabaseError as err:
75 LOG.fatal("Error accessing database module: %s", err)
76 raise UsageError("Database module cannot be accessed.") from err
79 class LegacyTokenizer:
80 """ The legacy tokenizer uses a special PostgreSQL module to normalize
81 names and queries. The tokenizer thus implements normalization through
82 calls to the database.
85 def __init__(self, dsn, data_dir):
87 self.data_dir = data_dir
88 self.normalization = None
91 def init_new_db(self, config, init_db=True):
92 """ Set up a new tokenizer for the database.
94 This copies all necessary data in the project directory to make
95 sure the tokenizer remains stable even over updates.
97 module_dir = _install_module(config.DATABASE_MODULE_PATH,
98 config.lib_dir.module,
99 config.project_dir / 'module')
101 self.normalization = config.TERM_NORMALIZATION
103 self._install_php(config)
105 with connect(self.dsn) as conn:
106 _check_module(module_dir, conn)
107 self._save_config(conn, config)
111 self.update_sql_functions(config)
112 self._init_db_tables(config)
115 def init_from_project(self):
116 """ Initialise the tokenizer from the project directory.
118 with connect(self.dsn) as conn:
119 self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
122 def finalize_import(self, config):
123 """ Do any required postprocessing to make the tokenizer data ready
126 with connect(self.dsn) as conn:
127 sqlp = SQLPreprocessor(conn, config)
128 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
131 def update_sql_functions(self, config):
132 """ Reimport the SQL functions for this tokenizer.
134 with connect(self.dsn) as conn:
135 max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
136 modulepath = config.DATABASE_MODULE_PATH or \
137 str((config.project_dir / 'module').resolve())
138 sqlp = SQLPreprocessor(conn, config)
139 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
140 max_word_freq=max_word_freq,
141 modulepath=modulepath)
144 def check_database(self):
145 """ Check that the tokenizer is set up correctly.
148 The Postgresql extension nominatim.so was not correctly loaded.
153 * Check the output of the CMmake/make installation step
154 * Does nominatim.so exist?
155 * Does nominatim.so exist on the database server?
156 * Can nominatim.so be accessed by the database user?
158 with connect(self.dsn) as conn:
159 with conn.cursor() as cur:
161 out = cur.scalar("SELECT make_standard_name('a')")
162 except psycopg2.Error as err:
163 return hint.format(error=str(err))
166 return hint.format(error='Unexpected result for make_standard_name()')
171 def migrate_database(self, config):
172 """ Initialise the project directory of an existing database for
173 use with this tokenizer.
175 This is a special migration function for updating existing databases
176 to new software versions.
178 self.normalization = config.TERM_NORMALIZATION
179 module_dir = _install_module(config.DATABASE_MODULE_PATH,
180 config.lib_dir.module,
181 config.project_dir / 'module')
183 with connect(self.dsn) as conn:
184 _check_module(module_dir, conn)
185 self._save_config(conn, config)
188 def name_analyzer(self):
189 """ Create a new analyzer for tokenizing names and queries
190 using this tokinzer. Analyzers are context managers and should
194 with tokenizer.name_analyzer() as analyzer:
198 When used outside the with construct, the caller must ensure to
199 call the close() function before destructing the analyzer.
201 Analyzers are not thread-safe. You need to instantiate one per thread.
203 normalizer = Transliterator.createFromRules("phrase normalizer",
205 return LegacyNameAnalyzer(self.dsn, normalizer)
208 def _install_php(self, config):
209 """ Install the php script for the tokenizer.
211 php_file = self.data_dir / "tokenizer.php"
212 php_file.write_text(dedent("""\
214 @define('CONST_Max_Word_Frequency', {0.MAX_WORD_FREQUENCY});
215 @define('CONST_Term_Normalization_Rules', "{0.TERM_NORMALIZATION}");
216 require_once('{0.lib_dir.php}/tokenizer/legacy_tokenizer.php');
220 def _init_db_tables(self, config):
221 """ Set up the word table and fill it with pre-computed word
224 with connect(self.dsn) as conn:
225 sqlp = SQLPreprocessor(conn, config)
226 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
229 LOG.warning("Precomputing word tokens")
230 db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
233 def _save_config(self, conn, config):
234 """ Save the configuration that needs to remain stable for the given
235 database as database properties.
237 properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
238 properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
241 class LegacyNameAnalyzer:
242 """ The legacy analyzer uses the special Postgresql module for
245 Each instance opens a connection to the database to request the
249 def __init__(self, dsn, normalizer):
250 self.conn = connect(dsn).connection
251 self.conn.autocommit = True
252 self.normalizer = normalizer
253 psycopg2.extras.register_hstore(self.conn)
255 self._cache = _TokenCache(self.conn)
262 def __exit__(self, exc_type, exc_value, traceback):
267 """ Free all resources used by the analyzer.
274 def get_word_token_info(self, words):
275 """ Return token information for the given list of words.
276 If a word starts with # it is assumed to be a full name
277 otherwise is a partial name.
279 The function returns a list of tuples with
280 (original word, word token, word id).
282 The function is used for testing and debugging only
283 and not necessarily efficient.
285 with self.conn.cursor() as cur:
286 cur.execute("""SELECT t.term, word_token, word_id
287 FROM word, (SELECT unnest(%s::TEXT[]) as term) t
288 WHERE word_token = (CASE
289 WHEN left(t.term, 1) = '#' THEN
290 ' ' || make_standard_name(substring(t.term from 2))
292 make_standard_name(t.term)
294 and class is null and country_code is null""",
297 return [(r[0], r[1], r[2]) for r in cur]
300 def normalize(self, phrase):
301 """ Normalize the given phrase, i.e. remove all properties that
302 are irrelevant for search.
304 return self.normalizer.transliterate(phrase)
308 def normalize_postcode(postcode):
309 """ Convert the postcode to a standardized form.
311 This function must yield exactly the same result as the SQL function
312 'token_normalized_postcode()'.
314 return postcode.strip().upper()
317 def update_postcodes_from_db(self):
318 """ Update postcode tokens in the word table from the location_postcode
321 with self.conn.cursor() as cur:
322 # This finds us the rows in location_postcode and word that are
323 # missing in the other table.
324 cur.execute("""SELECT * FROM
325 (SELECT pc, word FROM
326 (SELECT distinct(postcode) as pc FROM location_postcode) p
328 (SELECT word FROM word
329 WHERE class ='place' and type = 'postcode') w
331 WHERE pc is null or word is null""")
336 for postcode, word in cur:
338 to_delete.append(word)
340 to_add.append(postcode)
343 cur.execute("""DELETE FROM WORD
344 WHERE class ='place' and type = 'postcode'
348 cur.execute("""SELECT count(create_postcode_id(pc))
349 FROM unnest(%s) as pc
354 def update_special_phrases(self, phrases, should_replace):
355 """ Replace the search index for special phrases with the new phrases.
357 norm_phrases = set(((self.normalize(p[0]), p[1], p[2], p[3])
360 with self.conn.cursor() as cur:
361 # Get the old phrases.
362 existing_phrases = set()
363 cur.execute("""SELECT word, class, type, operator FROM word
364 WHERE class != 'place'
365 OR (type != 'house' AND type != 'postcode')""")
366 for label, cls, typ, oper in cur:
367 existing_phrases.add((label, cls, typ, oper or '-'))
369 to_add = norm_phrases - existing_phrases
370 to_delete = existing_phrases - norm_phrases
374 """ INSERT INTO word (word_id, word_token, word, class, type,
375 search_name_count, operator)
376 (SELECT nextval('seq_word'), ' ' || make_standard_name(name), name,
378 CASE WHEN op in ('in', 'near') THEN op ELSE null END
379 FROM (VALUES %s) as v(name, class, type, op))""",
382 if to_delete and should_replace:
384 """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
385 WHERE word = name and class = in_class and type = in_type
386 and ((op = '-' and operator is null) or op = operator)""",
389 LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
390 len(norm_phrases), len(to_add), len(to_delete))
393 def add_country_names(self, country_code, names):
394 """ Add names for the given country to the search index.
396 with self.conn.cursor() as cur:
398 """INSERT INTO word (word_id, word_token, country_code)
399 (SELECT nextval('seq_word'), lookup_token, %s
400 FROM (SELECT DISTINCT ' ' || make_standard_name(n) as lookup_token
402 WHERE NOT EXISTS(SELECT * FROM word
403 WHERE word_token = lookup_token and country_code = %s))
404 """, (country_code, list(names.values()), country_code))
407 def process_place(self, place):
408 """ Determine tokenizer information about the given place.
410 Returns a JSON-serialisable structure that will be handed into
411 the database via the token_info field.
413 token_info = _TokenInfo(self._cache)
415 names = place.get('name')
418 token_info.add_names(self.conn, names)
420 country_feature = place.get('country_feature')
421 if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
422 self.add_country_names(country_feature.lower(), names)
424 address = place.get('address')
426 self._process_place_address(token_info, address)
428 return token_info.data
431 def _process_place_address(self, token_info, address):
435 for key, value in address.items():
436 if key == 'postcode':
437 # Make sure the normalized postcode is present in the word table.
438 if re.search(r'[:,;]', value) is None:
439 self._cache.add_postcode(self.conn,
440 self.normalize_postcode(value))
441 elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
443 elif key == 'street':
444 token_info.add_street(self.conn, value)
446 token_info.add_place(self.conn, value)
447 elif not key.startswith('_') and key not in ('country', 'full'):
448 addr_terms.append((key, value))
451 token_info.add_housenumbers(self.conn, hnrs)
454 token_info.add_address_terms(self.conn, addr_terms)
459 """ Collect token information to be sent back to the database.
461 def __init__(self, cache):
466 def add_names(self, conn, names):
467 """ Add token information for the names of the place.
469 with conn.cursor() as cur:
470 # Create the token IDs for all names.
471 self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
475 def add_housenumbers(self, conn, hnrs):
476 """ Extract housenumber information from the address.
479 token = self.cache.get_housenumber(hnrs[0])
480 if token is not None:
481 self.data['hnr_tokens'] = token
482 self.data['hnr'] = hnrs[0]
485 # split numbers if necessary
488 simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
490 if len(simple_list) > 1:
491 simple_list = list(set(simple_list))
493 with conn.cursor() as cur:
494 cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
495 self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
498 def add_street(self, conn, street):
499 """ Add addr:street match terms.
501 def _get_street(name):
502 with conn.cursor() as cur:
503 return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))
505 self.data['street'] = self.cache.streets.get(street, _get_street)
508 def add_place(self, conn, place):
509 """ Add addr:place search and match terms.
511 def _get_place(name):
512 with conn.cursor() as cur:
513 cur.execute("""SELECT make_keywords(hstore('name' , %s))::text,
514 word_ids_from_name(%s)::text""",
516 return cur.fetchone()
518 self.data['place_search'], self.data['place_match'] = \
519 self.cache.places.get(place, _get_place)
522 def add_address_terms(self, conn, terms):
523 """ Add additional address terms.
525 def _get_address_term(name):
526 with conn.cursor() as cur:
527 cur.execute("""SELECT addr_ids_from_name(%s)::text,
528 word_ids_from_name(%s)::text""",
530 return cur.fetchone()
533 for key, value in terms:
534 tokens[key] = self.cache.address_terms.get(value, _get_address_term)
536 self.data['addr'] = tokens
540 """ Least recently used cache that accepts a generator function to
541 produce the item when there is a cache miss.
544 def __init__(self, maxsize=128, init_data=None):
545 self.data = init_data or OrderedDict()
546 self.maxsize = maxsize
547 if init_data is not None and len(init_data) > maxsize:
548 self.maxsize = len(init_data)
550 def get(self, key, generator):
551 """ Get the item with the given key from the cache. If nothing
552 is found in the cache, generate the value through the
553 generator function and store it in the cache.
555 value = self.data.get(key)
556 if value is not None:
557 self.data.move_to_end(key)
559 value = generator(key)
560 if len(self.data) >= self.maxsize:
561 self.data.popitem(last=False)
562 self.data[key] = value
568 """ Cache for token information to avoid repeated database queries.
570 This cache is not thread-safe and needs to be instantiated per
573 def __init__(self, conn):
575 self.streets = _LRU(maxsize=256)
576 self.places = _LRU(maxsize=128)
577 self.address_terms = _LRU(maxsize=1024)
579 # Lookup houseunumbers up to 100 and cache them
580 with conn.cursor() as cur:
581 cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
582 FROM generate_series(1, 100) as i""")
583 self._cached_housenumbers = {str(r[0]): r[1] for r in cur}
585 # For postcodes remember the ones that have already been added
586 self.postcodes = set()
588 def get_housenumber(self, number):
589 """ Get a housenumber token from the cache.
591 return self._cached_housenumbers.get(number)
594 def add_postcode(self, conn, postcode):
595 """ Make sure the given postcode is in the database.
597 if postcode not in self.postcodes:
598 with conn.cursor() as cur:
599 cur.execute('SELECT create_postcode_id(%s)', (postcode, ))
600 self.postcodes.add(postcode)