X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/fa2bc604685f4d9219844769b733d600d968deff..bef300305e3c445fe483b806b838ad8ce5b689f6:/nominatim/tokenizer/legacy_tokenizer.py diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index b0cbe9c3..d4068aea 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -1,7 +1,9 @@ """ Tokenizer implementing normalisation as used before Nominatim 4. """ +from collections import OrderedDict import logging +import re import shutil import psycopg2 @@ -194,6 +196,8 @@ class LegacyNameAnalyzer: self.conn.autocommit = True psycopg2.extras.register_hstore(self.conn) + self._cache = _TokenCache(self.conn) + def __enter__(self): return self @@ -210,10 +214,223 @@ class LegacyNameAnalyzer: self.conn.close() self.conn = None + + def add_postcodes_from_db(self): + """ Add postcodes from the location_postcode table to the word table. + """ + with self.conn.cursor() as cur: + cur.execute("""SELECT count(create_postcode_id(pc)) + FROM (SELECT distinct(postcode) as pc + FROM location_postcode) x""") + + + def add_country_names(self, country_code, names): + """ Add names for the given country to the search index. + """ + with self.conn.cursor() as cur: + cur.execute( + """INSERT INTO word (word_id, word_token, country_code) + (SELECT nextval('seq_word'), lookup_token, %s + FROM (SELECT ' ' || make_standard_name(n) as lookup_token + FROM unnest(%s)n) y + WHERE NOT EXISTS(SELECT * FROM word + WHERE word_token = lookup_token and country_code = %s)) + """, (country_code, names, country_code)) + + def process_place(self, place): """ Determine tokenizer information about the given place. Returns a JSON-serialisable structure that will be handed into the database via the token_info field. """ - return {} + token_info = _TokenInfo(self._cache) + + names = place.get('name') + + if names: + token_info.add_names(self.conn, names) + + country_feature = place.get('country_feature') + if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature): + self.add_country_names(country_feature.lower(), list(names.values())) + + address = place.get('address') + + if address: + hnrs = [] + addr_terms = [] + for key, value in address.items(): + if key == 'postcode': + self._add_postcode(value) + elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'): + hnrs.append(value) + elif key == 'street': + token_info.add_street(self.conn, value) + elif key == 'place': + token_info.add_place(self.conn, value) + elif not key.startswith('_') and \ + key not in ('country', 'full'): + addr_terms.append((key, value)) + + if hnrs: + token_info.add_housenumbers(self.conn, hnrs) + + if addr_terms: + token_info.add_address_terms(self.conn, addr_terms) + + return token_info.data + + + def _add_postcode(self, postcode): + """ Make sure the normalized postcode is present in the word table. + """ + def _create_postcode_from_db(pcode): + with self.conn.cursor() as cur: + cur.execute('SELECT create_postcode_id(%s)', (pcode, )) + + if re.search(r'[:,;]', postcode) is None: + self._cache.postcodes.get(postcode.strip().upper(), _create_postcode_from_db) + + +class _TokenInfo: + """ Collect token information to be sent back to the database. + """ + def __init__(self, cache): + self.cache = cache + self.data = {} + + + def add_names(self, conn, names): + """ Add token information for the names of the place. + """ + with conn.cursor() as cur: + # Create the token IDs for all names. + self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text", + (names, )) + + + def add_housenumbers(self, conn, hnrs): + """ Extract housenumber information from the address. + """ + if len(hnrs) == 1: + token = self.cache.get_housenumber(hnrs[0]) + if token is not None: + self.data['hnr_tokens'] = token + self.data['hnr'] = hnrs[0] + return + + # split numbers if necessary + simple_list = [] + for hnr in hnrs: + simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr))) + + if len(simple_list) > 1: + simple_list = list(set(simple_list)) + + with conn.cursor() as cur: + cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, )) + self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone() + + + def add_street(self, conn, street): + """ Add addr:street match terms. + """ + def _get_street(name): + with conn.cursor() as cur: + return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, )) + + self.data['street'] = self.cache.streets.get(street, _get_street) + + + def add_place(self, conn, place): + """ Add addr:place search and match terms. + """ + def _get_place(name): + with conn.cursor() as cur: + cur.execute("""SELECT (addr_ids_from_name(%s) + || getorcreate_name_id(make_standard_name(%s), ''))::text, + word_ids_from_name(%s)::text""", + (name, name, name)) + return cur.fetchone() + + self.data['place_search'], self.data['place_match'] = \ + self.cache.places.get(place, _get_place) + + + def add_address_terms(self, conn, terms): + """ Add additional address terms. + """ + def _get_address_term(name): + with conn.cursor() as cur: + cur.execute("""SELECT addr_ids_from_name(%s)::text, + word_ids_from_name(%s)::text""", + (name, name)) + return cur.fetchone() + + tokens = {} + for key, value in terms: + tokens[key] = self.cache.address_terms.get(value, _get_address_term) + + self.data['addr'] = tokens + + +class _LRU: + """ Least recently used cache that accepts a generator function to + produce the item when there is a cache miss. + """ + + def __init__(self, maxsize=128, init_data=None): + self.data = init_data or OrderedDict() + self.maxsize = maxsize + if init_data is not None and len(init_data) > maxsize: + self.maxsize = len(init_data) + + def get(self, key, generator): + """ Get the item with the given key from the cache. If nothing + is found in the cache, generate the value through the + generator function and store it in the cache. + """ + value = self.data.get(key) + if value is not None: + self.data.move_to_end(key) + else: + value = generator(key) + if len(self.data) >= self.maxsize: + self.data.popitem(last=False) + self.data[key] = value + + return value + + +class _TokenCache: + """ Cache for token information to avoid repeated database queries. + + This cache is not thread-safe and needs to be instantiated per + analyzer. + """ + def __init__(self, conn): + # various LRU caches + self.streets = _LRU(maxsize=256) + self.places = _LRU(maxsize=128) + self.address_terms = _LRU(maxsize=1024) + + # Lookup houseunumbers up to 100 and cache them + with conn.cursor() as cur: + cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text + FROM generate_series(1, 100) as i""") + self._cached_housenumbers = {str(r[0]) : r[1] for r in cur} + + # Get postcodes that are already saved + postcodes = OrderedDict() + with conn.cursor() as cur: + cur.execute("""SELECT word FROM word + WHERE class ='place' and type = 'postcode'""") + for row in cur: + postcodes[row[0]] = None + self.postcodes = _LRU(maxsize=32, init_data=postcodes) + + def get_housenumber(self, number): + """ Get a housenumber token from the cache. + """ + return self._cached_housenumbers.get(number)