X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/5e477e3b5b99da6fc4e54749d4671a6fc8fdcd66..d2853de47cac812193083f9b144a0bc818205dd6:/nominatim/tokenizer/legacy_tokenizer.py?ds=inline diff --git a/nominatim/tokenizer/legacy_tokenizer.py b/nominatim/tokenizer/legacy_tokenizer.py index 1b0b2980..1b68a494 100644 --- a/nominatim/tokenizer/legacy_tokenizer.py +++ b/nominatim/tokenizer/legacy_tokenizer.py @@ -106,6 +106,7 @@ class LegacyTokenizer(AbstractTokenizer): This copies all necessary data in the project directory to make sure the tokenizer remains stable even over updates. """ + assert config.project_dir is not None module_dir = _install_module(config.DATABASE_MODULE_PATH, config.lib_dir.module, config.project_dir / 'module') @@ -127,6 +128,8 @@ class LegacyTokenizer(AbstractTokenizer): def init_from_project(self, config: Configuration) -> None: """ Initialise the tokenizer from the project directory. """ + assert config.project_dir is not None + with connect(self.dsn) as conn: self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION) @@ -149,6 +152,8 @@ class LegacyTokenizer(AbstractTokenizer): def update_sql_functions(self, config: Configuration) -> None: """ Reimport the SQL functions for this tokenizer. """ + assert config.project_dir is not None + with connect(self.dsn) as conn: max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ) modulepath = config.DATABASE_MODULE_PATH or \ @@ -193,6 +198,8 @@ class LegacyTokenizer(AbstractTokenizer): This is a special migration function for updating existing databases to new software versions. """ + assert config.project_dir is not None + self.normalization = config.TERM_NORMALIZATION module_dir = _install_module(config.DATABASE_MODULE_PATH, config.lib_dir.module, @@ -249,6 +256,16 @@ class LegacyTokenizer(AbstractTokenizer): return LegacyNameAnalyzer(self.dsn, normalizer) + def most_frequent_words(self, conn: Connection, num: int) -> List[str]: + """ Return a list of the `num` most frequent full words + in the database. + """ + with conn.cursor() as cur: + cur.execute(""" SELECT word FROM word WHERE word is not null + ORDER BY search_name_count DESC LIMIT %s""", (num,)) + return list(s[0] for s in cur) + + def _install_php(self, config: Configuration, overwrite: bool = True) -> None: """ Install the php script for the tokenizer. """ @@ -557,14 +574,13 @@ class _TokenInfo: def add_street(self, conn: Connection, street: str) -> None: """ Add addr:street match terms. """ - def _get_street(name: str) -> List[int]: + def _get_street(name: str) -> Optional[str]: with conn.cursor() as cur: - return cast(List[int], + return cast(Optional[str], cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))) tokens = self.cache.streets.get(street, _get_street) - if tokens: - self.data['street'] = tokens + self.data['street'] = tokens or '{}' def add_place(self, conn: Connection, place: str) -> None: