X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/16daa57e4757e4daeffec1e61630f989727dc563..bb5de9b955e9ff676ea4d3c73cdfa94c60854857:/nominatim/tokenizer/base.py?ds=sidebyside diff --git a/nominatim/tokenizer/base.py b/nominatim/tokenizer/base.py index 53289c78..29bcc8e1 100644 --- a/nominatim/tokenizer/base.py +++ b/nominatim/tokenizer/base.py @@ -1,14 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# This file is part of Nominatim. (https://nominatim.org) +# +# Copyright (C) 2022 by the Nominatim developer community. +# For a full list of authors see the git log. """ -Abstract class defintions for tokenizers. These base classes are here +Abstract class definitions for tokenizers. These base classes are here mainly for documentation purposes. """ from abc import ABC, abstractmethod -from typing import List, Tuple, Dict, Any +from typing import List, Tuple, Dict, Any, Optional, Iterable +from pathlib import Path from nominatim.config import Configuration -from nominatim.indexer.place_info import PlaceInfo - -# pylint: disable=unnecessary-pass +from nominatim.db.connection import Connection +from nominatim.data.place_info import PlaceInfo +from nominatim.typing import Protocol class AbstractAnalyzer(ABC): """ The analyzer provides the functions for analysing names and building @@ -22,7 +29,7 @@ class AbstractAnalyzer(ABC): return self - def __exit__(self, exc_type, exc_value, traceback) -> None: + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() @@ -30,7 +37,6 @@ class AbstractAnalyzer(ABC): def close(self) -> None: """ Free all resources used by the analyzer. """ - pass @abstractmethod @@ -47,10 +53,9 @@ class AbstractAnalyzer(ABC): Returns: The function returns the list of all tuples that could be - found for the given words. Each list entry is a tuple of - (original word, word token, word id). + found for the given words. Each list entry is a tuple of + (original word, word token, word id). """ - pass @abstractmethod @@ -66,7 +71,6 @@ class AbstractAnalyzer(ABC): Returns: The given postcode after normalization. """ - pass @abstractmethod @@ -74,11 +78,11 @@ class AbstractAnalyzer(ABC): """ Update the tokenizer's postcode tokens from the current content of the `location_postcode` table. """ - pass @abstractmethod - def update_special_phrases(self, phrases: List[Tuple[str, str, str, str]], + def update_special_phrases(self, + phrases: Iterable[Tuple[str, str, str, str]], should_replace: bool) -> None: """ Update the tokenizer's special phrase tokens from the given list of special phrases. @@ -90,11 +94,10 @@ class AbstractAnalyzer(ABC): When false, just add the given phrases to the ones that already exist. """ - pass @abstractmethod - def add_country_names(self, country_code: str, names: Dict[str, str]): + def add_country_names(self, country_code: str, names: Dict[str, str]) -> None: """ Add the given names to the tokenizer's list of country tokens. Arguments: @@ -102,7 +105,6 @@ class AbstractAnalyzer(ABC): refer to. names: Dictionary of name type to name. """ - pass @abstractmethod @@ -112,11 +114,11 @@ class AbstractAnalyzer(ABC): the search index. Arguments: - place: Place information retrived from the database. + place: Place information retrieved from the database. Returns: A JSON-serialisable structure that will be handed into - the database via the `token_info` field. + the database via the `token_info` field. """ @@ -140,12 +142,9 @@ class AbstractTokenizer(ABC): init_db: When set to False, then initialisation of database tables should be skipped. This option is only required for - migration purposes and can be savely ignored by custom + migration purposes and can be safely ignored by custom tokenizers. - - TODO: can we move the init_db parameter somewhere else? """ - pass @abstractmethod @@ -158,7 +157,6 @@ class AbstractTokenizer(ABC): Arguments: config: Read-only object with configuration options. """ - pass @abstractmethod @@ -171,7 +169,6 @@ class AbstractTokenizer(ABC): Arguments: config: Read-only object with configuration options. """ - pass @abstractmethod @@ -186,25 +183,37 @@ class AbstractTokenizer(ABC): Arguments: config: Read-only object with configuration options. """ - pass @abstractmethod - def check_database(self, config: Configuration) -> str: + def check_database(self, config: Configuration) -> Optional[str]: """ Check that the database is set up correctly and ready for being queried. + Arguments: + config: Read-only object with configuration options. + Returns: If an issue was found, return an error message with the - description of the issue as well as hints for the user on - how to resolve the issue. + description of the issue as well as hints for the user on + how to resolve the issue. If everything is okay, return `None`. + """ + + + @abstractmethod + def update_statistics(self, config: Configuration) -> None: + """ Recompute any tokenizer statistics necessary for efficient lookup. + This function is meant to be called from time to time by the user + to improve performance. However, the tokenizer must not depend on + it to be called in order to work. + """ - Arguments: - config: Read-only object with configuration options. - Return `None`, if no issue was found. + @abstractmethod + def update_word_tokens(self) -> None: + """ Do house-keeping on the tokenizers internal data structures. + Remove unused word tokens, resort data etc. """ - pass @abstractmethod @@ -221,4 +230,24 @@ class AbstractTokenizer(ABC): When used outside the with construct, the caller must ensure to call the close() function before destructing the analyzer. """ - pass + + + @abstractmethod + def most_frequent_words(self, conn: Connection, num: int) -> List[str]: + """ Return a list of the most frequent full words in the database. + + Arguments: + conn: Open connection to the database which may be used to + retrieve the words. + num: Maximum number of words to return. + """ + + +class TokenizerModule(Protocol): + """ Interface that must be exported by modules that implement their + own tokenizer. + """ + + def create(self, dsn: str, data_dir: Path) -> AbstractTokenizer: + """ Factory for new tokenizers. + """