From 1618aba5f282a27fc45af28c4eeebb6dcd28c332 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 20 Jul 2021 11:21:13 +0200 Subject: [PATCH] switch country name tokens to new word table layout --- lib-php/tokenizer/legacy_icu_tokenizer.php | 33 +++++++++++---------- lib-sql/tokenizer/icu_tokenizer_tables.sql | 3 ++ nominatim/tokenizer/legacy_icu_tokenizer.py | 18 +++++++---- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/lib-php/tokenizer/legacy_icu_tokenizer.php b/lib-php/tokenizer/legacy_icu_tokenizer.php index eac964e4..ea445f23 100644 --- a/lib-php/tokenizer/legacy_icu_tokenizer.php +++ b/lib-php/tokenizer/legacy_icu_tokenizer.php @@ -146,8 +146,8 @@ class Tokenizer private function addTokensFromDB(&$oValidTokens, $aTokens, $sNormQuery) { // Check which tokens we have, get the ID numbers - $sSQL = 'SELECT word_id, word_token, word, class, type, country_code,'; - $sSQL .= ' operator, coalesce(search_name_count, 0) as count'; + $sSQL = 'SELECT word_id, word_token, type'; + $sSQL .= " info->>'cc' as country"; $sSQL .= ' FROM word WHERE word_token in ('; $sSQL .= join(',', $this->oDB->getDBQuotedList($aTokens)).')'; @@ -156,8 +156,20 @@ class Tokenizer $aDBWords = $this->oDB->getAll($sSQL, null, 'Could not get word tokens.'); foreach ($aDBWords as $aWord) { - $oToken = null; - $iId = (int) $aWord['word_id']; + switch ($aWord['type']) { + 'C': // country name tokens + if ($aWord['country'] === null + || ($this->aCountryRestriction + && !in_array($aWord['country'], $this->aCountryRestriction)) + ) { + continue; + } + $oToken = new Token\Country($iId, $aWord['country']) + break; + default: + continue; + } +/* $iId = (int) $aWord['word_id']; if ($aWord['class']) { // Special terms need to appear in their normalized form. @@ -207,16 +219,9 @@ class Tokenizer $aWord['word_token'], (int) $aWord['count'] ); - } + }*/ - if ($oToken) { - // remove any leading spaces - if ($aWord['word_token'][0] == ' ') { - $oValidTokens->addToken(substr($aWord['word_token'], 1), $oToken); - } else { - $oValidTokens->addToken($aWord['word_token'], $oToken); - } - } + $oValidTokens->addToken($aWord['word_token'], $oToken); } } @@ -234,12 +239,10 @@ class Tokenizer for ($i = 0; $i < $iNumWords; $i++) { $sPhrase = $aWords[$i]; - $aTokens[' '.$sPhrase] = ' '.$sPhrase; $aTokens[$sPhrase] = $sPhrase; for ($j = $i + 1; $j < $iNumWords; $j++) { $sPhrase .= ' '.$aWords[$j]; - $aTokens[' '.$sPhrase] = ' '.$sPhrase; $aTokens[$sPhrase] = $sPhrase; } } diff --git a/lib-sql/tokenizer/icu_tokenizer_tables.sql b/lib-sql/tokenizer/icu_tokenizer_tables.sql index 13e1bdb0..1d70a9c3 100644 --- a/lib-sql/tokenizer/icu_tokenizer_tables.sql +++ b/lib-sql/tokenizer/icu_tokenizer_tables.sql @@ -8,6 +8,9 @@ CREATE TABLE word_icu ( CREATE INDEX idx_word_word_token ON word USING BTREE (word_token) {{db.tablespace.search_index}}; +-- Used when updating country names from the boundary relation. +CREATE INDEX idx_word_country_names ON word + USING btree((info->>'cc')) WHERE type = 'C'; GRANT SELECT ON word TO "{{config.DATABASE_WEBUSER}}"; DROP SEQUENCE IF EXISTS seq_word; diff --git a/nominatim/tokenizer/legacy_icu_tokenizer.py b/nominatim/tokenizer/legacy_icu_tokenizer.py index 59ad09aa..32dd6535 100644 --- a/nominatim/tokenizer/legacy_icu_tokenizer.py +++ b/nominatim/tokenizer/legacy_icu_tokenizer.py @@ -371,22 +371,28 @@ class LegacyICUNameAnalyzer: """ word_tokens = set() for name in self._compute_full_names(names): - if name: - word_tokens.add(' ' + self.name_processor.get_search_normalized(name)) + norm_name = self.name_processor.get_search_normalized(name) + if norm_name: + word_tokens.add(norm_name) with self.conn.cursor() as cur: # Get existing names - cur.execute("SELECT word_token FROM word WHERE country_code = %s", + cur.execute("""SELECT word_token FROM word + WHERE type = 'C' and info->>'cc'= %s""", (country_code, )) word_tokens.difference_update((t[0] for t in cur)) + # Only add those names that are not yet in the list. if word_tokens: - cur.execute("""INSERT INTO word (word_id, word_token, country_code, - search_name_count) - (SELECT nextval('seq_word'), token, %s, 0 + cur.execute("""INSERT INTO word (word_token, type, info) + (SELECT token, 'C', json_build_object('cc', %s) FROM unnest(%s) as token) """, (country_code, list(word_tokens))) + # No names are deleted at the moment. + # If deletion is made possible, then the static names from the + # initial 'country_name' table should be kept. + def process_place(self, place): """ Determine tokenizer information about the given place. -- 2.39.5