+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2022 by the Nominatim developer community.
+# For a full list of authors see the git log.
"""
Tokenizer implementing normalisation as used before Nominatim 4 but using
libICU instead of the PostgreSQL module.
"""
-from collections import Counter
import itertools
import json
import logging
from textwrap import dedent
from nominatim.db.connection import connect
-from nominatim.db.properties import set_property, get_property
from nominatim.db.utils import CopyBuffer
from nominatim.db.sql_preprocessor import SQLPreprocessor
+from nominatim.indexer.place_info import PlaceInfo
from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
-from nominatim.tokenizer.icu_name_processor import ICUNameProcessor, ICUNameProcessorRules
from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer
-DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization"
LOG = logging.getLogger()
def __init__(self, dsn, data_dir):
self.dsn = dsn
self.data_dir = data_dir
- self.naming_rules = None
- self.term_normalization = None
- self.max_word_frequency = None
+ self.loader = None
def init_new_db(self, config, init_db=True):
This copies all necessary data in the project directory to make
sure the tokenizer remains stable even over updates.
"""
- loader = ICURuleLoader(config.load_sub_configuration('icu_tokenizer.yaml',
- config='TOKENIZER_CONFIG'))
- self.naming_rules = ICUNameProcessorRules(loader=loader)
- self.term_normalization = config.TERM_NORMALIZATION
- self.max_word_frequency = config.MAX_WORD_FREQUENCY
+ self.loader = ICURuleLoader(config)
self._install_php(config.lib_dir.php)
- self._save_config(config)
+ self._save_config()
if init_db:
self.update_sql_functions(config)
self._init_db_tables(config)
- def init_from_project(self):
+ def init_from_project(self, config):
""" Initialise the tokenizer from the project directory.
"""
+ self.loader = ICURuleLoader(config)
+
with connect(self.dsn) as conn:
- self.naming_rules = ICUNameProcessorRules(conn=conn)
- self.term_normalization = get_property(conn, DBCFG_TERM_NORMALIZATION)
- self.max_word_frequency = get_property(conn, DBCFG_MAXWORDFREQ)
+ self.loader.load_config_from_db(conn)
- def finalize_import(self, _):
+ def finalize_import(self, config):
""" Do any required postprocessing to make the tokenizer data ready
for use.
"""
+ with connect(self.dsn) as conn:
+ sqlp = SQLPreprocessor(conn, config)
+ sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
def update_sql_functions(self, config):
""" Reimport the SQL functions for this tokenizer.
"""
with connect(self.dsn) as conn:
- max_word_freq = get_property(conn, DBCFG_MAXWORDFREQ)
sqlp = SQLPreprocessor(conn, config)
- sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql',
- max_word_freq=max_word_freq)
+ sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql')
- def check_database(self):
+ def check_database(self, config):
""" Check that the tokenizer is set up correctly.
"""
- self.init_from_project()
+ # Will throw an error if there is an issue.
+ self.init_from_project(config)
+
- if self.naming_rules is None:
- return "Configuration for tokenizer 'icu' are missing."
+ def update_statistics(self):
+ """ Recompute frequencies for all name words.
+ """
+ with connect(self.dsn) as conn:
+ if conn.table_exists('search_name'):
+ with conn.cursor() as cur:
+ cur.drop_table("word_frequencies")
+ LOG.info("Computing word frequencies")
+ cur.execute("""CREATE TEMP TABLE word_frequencies AS
+ SELECT unnest(name_vector) as id, count(*)
+ FROM search_name GROUP BY id""")
+ cur.execute("CREATE INDEX ON word_frequencies(id)")
+ LOG.info("Update word table with recomputed frequencies")
+ cur.execute("""UPDATE word
+ SET info = info || jsonb_build_object('count', count)
+ FROM word_frequencies WHERE word_id = id""")
+ cur.drop_table("word_frequencies")
+ conn.commit()
+
+
+ def _cleanup_housenumbers(self):
+ """ Remove unused house numbers.
+ """
+ with connect(self.dsn) as conn:
+ if not conn.table_exists('search_name'):
+ return
+ with conn.cursor(name="hnr_counter") as cur:
+ cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
+ FROM word
+ WHERE type = 'H'
+ AND NOT EXISTS(SELECT * FROM search_name
+ WHERE ARRAY[word.word_id] && name_vector)
+ AND (char_length(coalesce(word, word_token)) > 6
+ OR coalesce(word, word_token) not similar to '\\d+')
+ """)
+ candidates = {token: wid for wid, token in cur}
+ with conn.cursor(name="hnr_counter") as cur:
+ cur.execute("""SELECT housenumber FROM placex
+ WHERE housenumber is not null
+ AND (char_length(housenumber) > 6
+ OR housenumber not similar to '\\d+')
+ """)
+ for row in cur:
+ for hnr in row[0].split(';'):
+ candidates.pop(hnr, None)
+ LOG.info("There are %s outdated housenumbers.", len(candidates))
+ LOG.debug("Outdated housenumbers: %s", candidates.keys())
+ if candidates:
+ with conn.cursor() as cur:
+ cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
+ (list(candidates.values()), ))
+ conn.commit()
- return None
+
+
+ def update_word_tokens(self):
+ """ Remove unused tokens.
+ """
+ LOG.warning("Cleaning up housenumber tokens.")
+ self._cleanup_housenumbers()
+ LOG.warning("Tokenizer house-keeping done.")
def name_analyzer(self):
Analyzers are not thread-safe. You need to instantiate one per thread.
"""
- return LegacyICUNameAnalyzer(self.dsn, ICUNameProcessor(self.naming_rules))
+ return LegacyICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(),
+ self.loader.make_token_analysis())
def _install_php(self, phpdir):
php_file = self.data_dir / "tokenizer.php"
php_file.write_text(dedent(f"""\
<?php
- @define('CONST_Max_Word_Frequency', {self.max_word_frequency});
- @define('CONST_Term_Normalization_Rules', "{self.term_normalization}");
- @define('CONST_Transliteration', "{self.naming_rules.search_rules}");
+ @define('CONST_Max_Word_Frequency', 10000000);
+ @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
+ @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""))
- def _save_config(self, config):
+ def _save_config(self):
""" Save the configuration that needs to remain stable for the given
database as database properties.
"""
with connect(self.dsn) as conn:
- self.naming_rules.save_rules(conn)
-
- set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
- set_property(conn, DBCFG_TERM_NORMALIZATION, self.term_normalization)
+ self.loader.save_config_to_db(conn)
def _init_db_tables(self, config):
sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer_tables.sql')
conn.commit()
- LOG.warning("Precomputing word tokens")
-
- # get partial words and their frequencies
- words = self._count_partial_terms(conn)
-
- # copy them back into the word table
- with CopyBuffer() as copystr:
- for term, cnt in words.items():
- copystr.add('w', term, json.dumps({'count': cnt}))
-
- with conn.cursor() as cur:
- copystr.copy_out(cur, 'word',
- columns=['type', 'word_token', 'info'])
- cur.execute("""UPDATE word SET word_id = nextval('seq_word')
- WHERE word_id is null and type = 'w'""")
-
- conn.commit()
-
- def _count_partial_terms(self, conn):
- """ Count the partial terms from the names in the place table.
- """
- words = Counter()
- name_proc = ICUNameProcessor(self.naming_rules)
-
- with conn.cursor(name="words") as cur:
- cur.execute(""" SELECT v, count(*) FROM
- (SELECT svals(name) as v FROM place)x
- WHERE length(v) < 75 GROUP BY v""")
-
- for name, cnt in cur:
- terms = set()
- for word in name_proc.get_variants_ascii(name_proc.get_normalized(name)):
- if ' ' in word:
- terms.update(word.split())
- for term in terms:
- words[term] += cnt
-
- return words
-
class LegacyICUNameAnalyzer(AbstractAnalyzer):
""" The legacy analyzer uses the ICU library for splitting names.
normalization.
"""
- def __init__(self, dsn, name_proc):
+ def __init__(self, dsn, sanitizer, token_analysis):
self.conn = connect(dsn).connection
self.conn.autocommit = True
- self.name_processor = name_proc
+ self.sanitizer = sanitizer
+ self.token_analysis = token_analysis
self._cache = _TokenCache()
self.conn = None
+ def _search_normalized(self, name):
+ """ Return the search token transliteration of the given name.
+ """
+ return self.token_analysis.search.transliterate(name).strip()
+
+
+ def _normalized(self, name):
+ """ Return the normalized version of the given name with all
+ non-relevant information removed.
+ """
+ return self.token_analysis.normalizer.transliterate(name).strip()
+
+
def get_word_token_info(self, words):
""" Return token information for the given list of words.
If a word starts with # it is assumed to be a full name
partial_tokens = {}
for word in words:
if word.startswith('#'):
- full_tokens[word] = self.name_processor.get_search_normalized(word[1:])
+ full_tokens[word] = self._search_normalized(word[1:])
else:
- partial_tokens[word] = self.name_processor.get_search_normalized(word)
+ partial_tokens[word] = self._search_normalized(word)
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, word_id
return postcode.strip().upper()
- def _make_standard_hnr(self, hnr):
- """ Create a normalised version of a housenumber.
-
- This function takes minor shortcuts on transliteration.
- """
- return self.name_processor.get_search_normalized(hnr)
-
def update_postcodes_from_db(self):
""" Update postcode tokens in the word table from the location_postcode
table.
if postcode is None:
to_delete.append(word)
else:
- copystr.add(self.name_processor.get_search_normalized(postcode),
+ copystr.add(self._search_normalized(postcode),
'P', postcode)
if to_delete:
completely replaced. Otherwise the phrases are added to the
already existing ones.
"""
- norm_phrases = set(((self.name_processor.get_normalized(p[0]), p[1], p[2], p[3])
+ norm_phrases = set(((self._normalized(p[0]), p[1], p[2], p[3])
for p in phrases))
with self.conn.cursor() as cur:
added = 0
with CopyBuffer() as copystr:
for word, cls, typ, oper in to_add:
- term = self.name_processor.get_search_normalized(word)
+ term = self._search_normalized(word)
if term:
copystr.add(term, 'S', word,
json.dumps({'class': cls, 'type': typ,
def add_country_names(self, country_code, names):
- """ Add names for the given country to the search index.
+ """ Add default names for the given country to the search index.
+ """
+ # Make sure any name preprocessing for country names applies.
+ info = PlaceInfo({'name': names, 'country_code': country_code,
+ 'rank_address': 4, 'class': 'boundary',
+ 'type': 'administrative'})
+ self._add_country_full_names(country_code,
+ self.sanitizer.process_names(info)[0],
+ internal=True)
+
+
+ def _add_country_full_names(self, country_code, names, internal=False):
+ """ Add names for the given country from an already sanitized
+ name list.
"""
word_tokens = set()
- for name in self._compute_full_names(names):
- norm_name = self.name_processor.get_search_normalized(name)
+ for name in names:
+ norm_name = self._search_normalized(name.name)
if norm_name:
word_tokens.add(norm_name)
with self.conn.cursor() as cur:
# Get existing names
- cur.execute("""SELECT word_token FROM word
- WHERE type = 'C' and word = %s""",
+ cur.execute("""SELECT word_token, coalesce(info ? 'internal', false) as is_internal
+ FROM word
+ WHERE type = 'C' and word = %s""",
(country_code, ))
- word_tokens.difference_update((t[0] for t in cur))
+ existing_tokens = {True: set(), False: set()} # internal/external names
+ for word in cur:
+ existing_tokens[word[1]].add(word[0])
+
+ # Delete names that no longer exist.
+ gone_tokens = existing_tokens[internal] - word_tokens
+ if internal:
+ gone_tokens.update(existing_tokens[False] & word_tokens)
+ if gone_tokens:
+ cur.execute("""DELETE FROM word
+ USING unnest(%s) as token
+ WHERE type = 'C' and word = %s
+ and word_token = token""",
+ (list(gone_tokens), country_code))
# Only add those names that are not yet in the list.
- if word_tokens:
- cur.execute("""INSERT INTO word (word_token, type, word)
- (SELECT token, 'C', %s
- FROM unnest(%s) as token)
- """, (country_code, list(word_tokens)))
-
- # No names are deleted at the moment.
- # If deletion is made possible, then the static names from the
- # initial 'country_name' table should be kept.
+ new_tokens = word_tokens - existing_tokens[True]
+ if not internal:
+ new_tokens -= existing_tokens[False]
+ if new_tokens:
+ if internal:
+ sql = """INSERT INTO word (word_token, type, word, info)
+ (SELECT token, 'C', %s, '{"internal": "yes"}'
+ FROM unnest(%s) as token)
+ """
+ else:
+ sql = """INSERT INTO word (word_token, type, word)
+ (SELECT token, 'C', %s
+ FROM unnest(%s) as token)
+ """
+ cur.execute(sql, (country_code, list(new_tokens)))
def process_place(self, place):
""" Determine tokenizer information about the given place.
- Returns a JSON-serialisable structure that will be handed into
+ Returns a JSON-serializable structure that will be handed into
the database via the token_info field.
"""
- token_info = _TokenInfo(self._cache)
+ token_info = _TokenInfo()
- names = place.get('name')
+ names, address = self.sanitizer.process_names(place)
if names:
- fulls, partials = self._compute_name_tokens(names)
+ token_info.set_names(*self._compute_name_tokens(names))
- token_info.add_names(fulls, partials)
+ if place.is_country():
+ self._add_country_full_names(place.country_code, names)
- country_feature = place.get('country_feature')
- if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
- self.add_country_names(country_feature.lower(), names)
-
- address = place.get('address')
if address:
self._process_place_address(token_info, address)
- return token_info.data
+ return token_info.to_dict()
def _process_place_address(self, token_info, address):
- hnrs = []
- addr_terms = []
- for key, value in address.items():
- if key == 'postcode':
- self._add_postcode(value)
- elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
- hnrs.append(value)
- elif key == 'street':
- token_info.add_street(*self._compute_name_tokens({'name': value}))
- elif key == 'place':
- token_info.add_place(*self._compute_name_tokens({'name': value}))
- elif not key.startswith('_') and \
- key not in ('country', 'full'):
- addr_terms.append((key, *self._compute_name_tokens({'name': value})))
-
- if hnrs:
- hnrs = self._split_housenumbers(hnrs)
- token_info.add_housenumbers(self.conn, [self._make_standard_hnr(n) for n in hnrs])
-
- if addr_terms:
- token_info.add_address_terms(addr_terms)
+ for item in address:
+ if item.kind == 'postcode':
+ self._add_postcode(item.name)
+ elif item.kind == 'housenumber':
+ token_info.add_housenumber(*self._compute_housenumber_token(item))
+ elif item.kind == 'street':
+ token_info.add_street(self._retrieve_full_tokens(item.name))
+ elif item.kind == 'place':
+ if not item.suffix:
+ token_info.add_place(self._compute_partial_tokens(item.name))
+ elif not item.kind.startswith('_') and not item.suffix and \
+ item.kind not in ('country', 'full'):
+ token_info.add_address_term(item.kind, self._compute_partial_tokens(item.name))
+
+
+ def _compute_housenumber_token(self, hnr):
+ """ Normalize the housenumber and return the word token and the
+ canonical form.
+ """
+ analyzer = self.token_analysis.analysis.get('@housenumber')
+ result = None, None
+
+ if analyzer is None:
+ # When no custom analyzer is set, simply normalize and transliterate
+ norm_name = self._search_normalized(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
+ result = cur.fetchone()[0], norm_name
+ self._cache.housenumbers[norm_name] = result
+ else:
+ # Otherwise use the analyzer to determine the canonical name.
+ # Per convention we use the first variant as the 'lookup name', the
+ # name that gets saved in the housenumber field of the place.
+ norm_name = analyzer.normalize(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ variants = analyzer.get_variants_ascii(norm_name)
+ if variants:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT create_analyzed_hnr_id(%s, %s)",
+ (norm_name, list(variants)))
+ result = cur.fetchone()[0], variants[0]
+ self._cache.housenumbers[norm_name] = result
+
+ return result
+
+
+ def _compute_partial_tokens(self, name):
+ """ Normalize the given term, split it into partial words and return
+ then token list for them.
+ """
+ norm_name = self._search_normalized(name)
+
+ tokens = []
+ need_lookup = []
+ for partial in norm_name.split():
+ token = self._cache.partials.get(partial)
+ if token:
+ tokens.append(token)
+ else:
+ need_lookup.append(partial)
+
+ if need_lookup:
+ with self.conn.cursor() as cur:
+ cur.execute("""SELECT word, getorcreate_partial_word(word)
+ FROM unnest(%s) word""",
+ (need_lookup, ))
+
+ for partial, token in cur:
+ tokens.append(token)
+ self._cache.partials[partial] = token
+
+ return tokens
+
+
+ def _retrieve_full_tokens(self, name):
+ """ Get the full name token for the given name, if it exists.
+ The name is only retrived for the standard analyser.
+ """
+ norm_name = self._search_normalized(name)
+
+ # return cached if possible
+ if norm_name in self._cache.fulls:
+ return self._cache.fulls[norm_name]
+
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT word_id FROM word WHERE word_token = %s and type = 'W'",
+ (norm_name, ))
+ full = [row[0] for row in cur]
+
+ self._cache.fulls[norm_name] = full
+
+ return full
def _compute_name_tokens(self, names):
""" Computes the full name and partial name tokens for the given
dictionary of names.
"""
- full_names = self._compute_full_names(names)
full_tokens = set()
partial_tokens = set()
- for name in full_names:
- norm_name = self.name_processor.get_normalized(name)
- full, part = self._cache.names.get(norm_name, (None, None))
+ for name in names:
+ analyzer_id = name.get_attr('analyzer')
+ analyzer = self.token_analysis.get_analyzer(analyzer_id)
+ norm_name = analyzer.normalize(name.name)
+ if analyzer_id is None:
+ token_id = norm_name
+ else:
+ token_id = f'{norm_name}@{analyzer_id}'
+
+ full, part = self._cache.names.get(token_id, (None, None))
if full is None:
- variants = self.name_processor.get_variants_ascii(norm_name)
+ variants = analyzer.get_variants_ascii(norm_name)
if not variants:
continue
with self.conn.cursor() as cur:
- cur.execute("SELECT (getorcreate_full_word(%s, %s)).*",
- (norm_name, variants))
+ cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
+ (token_id, variants))
full, part = cur.fetchone()
- self._cache.names[norm_name] = (full, part)
+ self._cache.names[token_id] = (full, part)
full_tokens.add(full)
partial_tokens.update(part)
return full_tokens, partial_tokens
- @staticmethod
- def _compute_full_names(names):
- """ Return the set of all full name word ids to be used with the
- given dictionary of names.
- """
- full_names = set()
- for name in (n.strip() for ns in names.values() for n in re.split('[;,]', ns)):
- if name:
- full_names.add(name)
-
- brace_idx = name.find('(')
- if brace_idx >= 0:
- full_names.add(name[:brace_idx].strip())
-
- return full_names
-
-
def _add_postcode(self, postcode):
""" Make sure the normalized postcode is present in the word table.
"""
postcode = self.normalize_postcode(postcode)
if postcode not in self._cache.postcodes:
- term = self.name_processor.get_search_normalized(postcode)
+ term = self._search_normalized(postcode)
if not term:
return
self._cache.postcodes.add(postcode)
+class _TokenInfo:
+ """ Collect token information to be sent back to the database.
+ """
+ def __init__(self):
+ self.names = None
+ self.housenumbers = set()
+ self.housenumber_tokens = set()
+ self.street_tokens = set()
+ self.place_tokens = set()
+ self.address_tokens = {}
+
+
@staticmethod
- def _split_housenumbers(hnrs):
- if len(hnrs) > 1 or ',' in hnrs[0] or ';' in hnrs[0]:
- # split numbers if necessary
- simple_list = []
- for hnr in hnrs:
- simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
-
- if len(simple_list) > 1:
- hnrs = list(set(simple_list))
- else:
- hnrs = simple_list
+ def _mk_array(tokens):
+ return f"{{{','.join((str(s) for s in tokens))}}}"
- return hnrs
+ def to_dict(self):
+ """ Return the token information in database importable format.
+ """
+ out = {}
+ if self.names:
+ out['names'] = self.names
+ if self.housenumbers:
+ out['hnr'] = ';'.join(self.housenumbers)
+ out['hnr_tokens'] = self._mk_array(self.housenumber_tokens)
-class _TokenInfo:
- """ Collect token information to be sent back to the database.
- """
- def __init__(self, cache):
- self._cache = cache
- self.data = {}
+ if self.street_tokens:
+ out['street'] = self._mk_array(self.street_tokens)
- @staticmethod
- def _mk_array(tokens):
- return '{%s}' % ','.join((str(s) for s in tokens))
+ if self.place_tokens:
+ out['place'] = self._mk_array(self.place_tokens)
+ if self.address_tokens:
+ out['addr'] = self.address_tokens
- def add_names(self, fulls, partials):
+ return out
+
+
+ def set_names(self, fulls, partials):
""" Adds token information for the normalised names.
"""
- self.data['names'] = self._mk_array(itertools.chain(fulls, partials))
+ self.names = self._mk_array(itertools.chain(fulls, partials))
- def add_housenumbers(self, conn, hnrs):
+ def add_housenumber(self, token, hnr):
""" Extract housenumber information from a list of normalised
housenumbers.
"""
- self.data['hnr_tokens'] = self._mk_array(self._cache.get_hnr_tokens(conn, hnrs))
- self.data['hnr'] = ';'.join(hnrs)
+ if token:
+ self.housenumbers.add(hnr)
+ self.housenumber_tokens.add(token)
- def add_street(self, fulls, _):
+ def add_street(self, tokens):
""" Add addr:street match terms.
"""
- if fulls:
- self.data['street'] = self._mk_array(fulls)
+ self.street_tokens.update(tokens)
- def add_place(self, fulls, partials):
+ def add_place(self, tokens):
""" Add addr:place search and match terms.
"""
- if fulls:
- self.data['place_search'] = self._mk_array(itertools.chain(fulls, partials))
- self.data['place_match'] = self._mk_array(fulls)
+ self.place_tokens.update(tokens)
- def add_address_terms(self, terms):
+ def add_address_term(self, key, partials):
""" Add additional address terms.
"""
- tokens = {}
-
- for key, fulls, partials in terms:
- if fulls:
- tokens[key] = [self._mk_array(itertools.chain(fulls, partials)),
- self._mk_array(fulls)]
-
- if tokens:
- self.data['addr'] = tokens
+ if partials:
+ self.address_tokens[key] = self._mk_array(partials)
class _TokenCache:
"""
def __init__(self):
self.names = {}
+ self.partials = {}
+ self.fulls = {}
self.postcodes = set()
self.housenumbers = {}
-
-
- def get_hnr_tokens(self, conn, terms):
- """ Get token ids for a list of housenumbers, looking them up in the
- database if necessary. `terms` is an iterable of normalized
- housenumbers.
- """
- tokens = []
- askdb = []
-
- for term in terms:
- token = self.housenumbers.get(term)
- if token is None:
- askdb.append(term)
- else:
- tokens.append(token)
-
- if askdb:
- with conn.cursor() as cur:
- cur.execute("SELECT nr, getorcreate_hnr_id(nr) FROM unnest(%s) as nr",
- (askdb, ))
- for term, tid in cur:
- self.housenumbers[term] = tid
- tokens.append(tid)
-
- return tokens