"""
self.loader = ICURuleLoader(config)
- self._install_php(config.lib_dir.php)
+ self._install_php(config.lib_dir.php, overwrite=True)
self._save_config()
if init_db:
with connect(self.dsn) as conn:
self.loader.load_config_from_db(conn)
+ self._install_php(config.lib_dir.php, overwrite=False)
+
def finalize_import(self, config):
""" Do any required postprocessing to make the tokenizer data ready
if not conn.table_exists('search_name'):
return
with conn.cursor(name="hnr_counter") as cur:
- cur.execute("""SELECT word_id, word_token FROM word
+ cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
+ FROM word
WHERE type = 'H'
AND NOT EXISTS(SELECT * FROM search_name
WHERE ARRAY[word.word_id] && name_vector)
- AND (char_length(word_token) > 6
- OR word_token not similar to '\\d+')
+ AND (char_length(coalesce(word, word_token)) > 6
+ OR coalesce(word, word_token) not similar to '\\d+')
""")
candidates = {token: wid for wid, token in cur}
with conn.cursor(name="hnr_counter") as cur:
for hnr in row[0].split(';'):
candidates.pop(hnr, None)
LOG.info("There are %s outdated housenumbers.", len(candidates))
+ LOG.debug("Outdated housenumbers: %s", candidates.keys())
if candidates:
with conn.cursor() as cur:
cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
self.loader.make_token_analysis())
- def _install_php(self, phpdir):
+ def _install_php(self, phpdir, overwrite=True):
""" Install the php script for the tokenizer.
"""
php_file = self.data_dir / "tokenizer.php"
- php_file.write_text(dedent(f"""\
- <?php
- @define('CONST_Max_Word_Frequency', 10000000);
- @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
- @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
- require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""))
+
+ if not php_file.exists() or overwrite:
+ php_file.write_text(dedent(f"""\
+ <?php
+ @define('CONST_Max_Word_Frequency', 10000000);
+ @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
+ @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
+ require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""), encoding='utf-8')
def _save_config(self):
+ [(k, v, part_ids.get(v, None)) for k, v in partial_tokens.items()]
- @staticmethod
- def normalize_postcode(postcode):
+ def normalize_postcode(self, postcode):
""" Convert the postcode to a standardized form.
This function must yield exactly the same result as the SQL function
""" Normalize the housenumber and return the word token and the
canonical form.
"""
- norm_name = self._search_normalized(hnr.name)
- if not norm_name:
- return None, None
+ analyzer = self.token_analysis.analysis.get('@housenumber')
+ result = None, None
- token = self._cache.housenumbers.get(norm_name)
- if token is None:
- with self.conn.cursor() as cur:
- cur.execute("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
- token = cur.fetchone()[0]
- self._cache.housenumbers[norm_name] = token
+ if analyzer is None:
+ # When no custom analyzer is set, simply normalize and transliterate
+ norm_name = self._search_normalized(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
+ result = cur.fetchone()[0], norm_name
+ self._cache.housenumbers[norm_name] = result
+ else:
+ # Otherwise use the analyzer to determine the canonical name.
+ # Per convention we use the first variant as the 'lookup name', the
+ # name that gets saved in the housenumber field of the place.
+ norm_name = analyzer.normalize(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ variants = analyzer.get_variants_ascii(norm_name)
+ if variants:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT create_analyzed_hnr_id(%s, %s)",
+ (norm_name, list(variants)))
+ result = cur.fetchone()[0], variants[0]
+ self._cache.housenumbers[norm_name] = result
- return token, norm_name
+ return result
def _compute_partial_tokens(self, name):
continue
with self.conn.cursor() as cur:
- cur.execute("SELECT (getorcreate_full_word(%s, %s)).*",
+ cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
(token_id, variants))
full, part = cur.fetchone()