From: Sarah Hoffmann Date: Thu, 20 Jan 2022 19:05:15 +0000 (+0100) Subject: add new command for cleaning word tokens X-Git-Tag: v4.1.0~93^2~3 X-Git-Url: https://git.openstreetmap.org./nominatim.git/commitdiff_plain/344a2bfc1a3c7cce1f7517e707e420adbdc41116 add new command for cleaning word tokens Just pulls outdated housenumbers for the moment. --- diff --git a/nominatim/clicmd/refresh.py b/nominatim/clicmd/refresh.py index 4df283f8..c741dcf6 100644 --- a/nominatim/clicmd/refresh.py +++ b/nominatim/clicmd/refresh.py @@ -39,6 +39,8 @@ class UpdateRefresh: group = parser.add_argument_group('Data arguments') group.add_argument('--postcodes', action='store_true', help='Update postcode centroid table') + group.add_argument('--word-tokens', action='store_true', + help='Clean up search terms') group.add_argument('--word-counts', action='store_true', help='Compute frequency of full-word search terms') group.add_argument('--address-levels', action='store_true', @@ -76,6 +78,10 @@ class UpdateRefresh: LOG.error("The place table doesn't exist. " "Postcode updates on a frozen database is not possible.") + if args.word_tokens: + tokenizer = self._get_tokenizer(args.config) + tokenizer.update_word_tokens() + if args.word_counts: LOG.warning('Recompute word statistics') self._get_tokenizer(args.config).update_statistics() diff --git a/nominatim/tokenizer/base.py b/nominatim/tokenizer/base.py index 980dc69e..f81b3bc2 100644 --- a/nominatim/tokenizer/base.py +++ b/nominatim/tokenizer/base.py @@ -209,6 +209,13 @@ class AbstractTokenizer(ABC): """ + @abstractmethod + def update_word_tokens(self) -> None: + """ Do house-keeping on the tokenizers internal data structures. + Remove unused word tokens, resort data etc. + """ + + @abstractmethod def name_analyzer(self) -> AbstractAnalyzer: """ Create a new analyzer for tokenizing names and queries diff --git a/nominatim/tokenizer/icu_tokenizer.py b/nominatim/tokenizer/icu_tokenizer.py index cfbb44e3..da07897b 100644 --- a/nominatim/tokenizer/icu_tokenizer.py +++ b/nominatim/tokenizer/icu_tokenizer.py @@ -112,6 +112,39 @@ class LegacyICUTokenizer(AbstractTokenizer): conn.commit() + def _cleanup_housenumbers(self): + """ Remove unused house numbers. + """ + with connect(self.dsn) as conn: + with conn.cursor(name="hnr_counter") as cur: + cur.execute("""SELECT word_id, word_token FROM word + WHERE type = 'H' + AND NOT EXISTS(SELECT * FROM search_name + WHERE ARRAY[word.word_id] && name_vector) + AND (char_length(word_token) > 6 + OR word_token not similar to '\d+') + """) + candidates = {token: wid for wid, token in cur} + with conn.cursor(name="hnr_counter") as cur: + cur.execute("""SELECT housenumber FROM placex + WHERE housenumber is not null + AND (char_length(housenumber) > 6 + OR housenumber not similar to '\d+') + """) + for row in cur: + for hnr in row[0].split(';'): + candidates.pop(hnr, None) + LOG.info("There are %s outdated housenumbers.", len(candidates)) + + + def update_word_tokens(self): + """ Remove unused tokens. + """ + LOG.info("Cleaning up housenumber tokens.") + self._cleanup_housenumbers() + LOG.info("Tokenizer house-keeping done.") + + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 551b0536..7ce6b242 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -211,6 +211,13 @@ class LegacyTokenizer(AbstractTokenizer): cur.drop_table("word_frequencies") conn.commit() + + def update_word_tokens(self): + """ No house-keeping implemented for the legacy tokenizer. + """ + LOG.info("No tokenizer clean-up available.") + + def name_analyzer(self): """ Create a new analyzer for tokenizing names and queries using this tokinzer. Analyzers are context managers and should