]> git.openstreetmap.org Git - nominatim.git/blobdiff - src/nominatim_db/tokenizer/icu_tokenizer.py
Update lookup.py - Correct spelling for "simultaneously"
[nominatim.git] / src / nominatim_db / tokenizer / icu_tokenizer.py
index af03bd39811a7ce327f5b154c17e21b0b6698b6c..83928644a9c3a9964e26af05c81ef061b8cfeb05 100644 (file)
@@ -11,15 +11,16 @@ libICU instead of the PostgreSQL module.
 from typing import Optional, Sequence, List, Tuple, Mapping, Any, cast, \
                    Dict, Set, Iterable
 import itertools
-import json
 import logging
 from pathlib import Path
-from textwrap import dedent
 
-from nominatim_core.db.connection import connect, Connection, Cursor
-from nominatim_core.config import Configuration
-from nominatim_core.db.utils import CopyBuffer
-from nominatim_core.db.sql_preprocessor import SQLPreprocessor
+from psycopg.types.json import Jsonb
+from psycopg import sql as pysql
+
+from ..db.connection import connect, Connection, Cursor, server_version_tuple, \
+                            drop_tables, table_exists, execute_scalar
+from ..config import Configuration
+from ..db.sql_preprocessor import SQLPreprocessor
 from ..data.place_info import PlaceInfo
 from ..data.place_name import PlaceName
 from .icu_rule_loader import ICURuleLoader
@@ -31,10 +32,11 @@ DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization"
 
 LOG = logging.getLogger()
 
-WORD_TYPES =(('country_names', 'C'),
-             ('postcodes', 'P'),
-             ('full_word', 'W'),
-             ('housenumbers', 'H'))
+WORD_TYPES = (('country_names', 'C'),
+              ('postcodes', 'P'),
+              ('full_word', 'W'),
+              ('housenumbers', 'H'))
+
 
 def create(dsn: str, data_dir: Path) -> 'ICUTokenizer':
     """ Create a new instance of the tokenizer provided by this module.
@@ -53,7 +55,6 @@ class ICUTokenizer(AbstractTokenizer):
         self.data_dir = data_dir
         self.loader: Optional[ICURuleLoader] = None
 
-
     def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
         """ Set up a new tokenizer for the database.
 
@@ -62,7 +63,6 @@ class ICUTokenizer(AbstractTokenizer):
         """
         self.loader = ICURuleLoader(config)
 
-        self._install_php(config.lib_dir.php, overwrite=True)
         self._save_config()
 
         if init_db:
@@ -70,7 +70,6 @@ class ICUTokenizer(AbstractTokenizer):
             self._setup_db_tables(config)
             self._create_base_indices(config, 'word')
 
-
     def init_from_project(self, config: Configuration) -> None:
         """ Initialise the tokenizer from the project directory.
         """
@@ -79,16 +78,12 @@ class ICUTokenizer(AbstractTokenizer):
         with connect(self.dsn) as conn:
             self.loader.load_config_from_db(conn)
 
-        self._install_php(config.lib_dir.php, overwrite=False)
-
-
     def finalize_import(self, config: Configuration) -> None:
         """ Do any required postprocessing to make the tokenizer data ready
             for use.
         """
         self._create_lookup_indices(config, 'word')
 
-
     def update_sql_functions(self, config: Configuration) -> None:
         """ Reimport the SQL functions for this tokenizer.
         """
@@ -96,31 +91,28 @@ class ICUTokenizer(AbstractTokenizer):
             sqlp = SQLPreprocessor(conn, config)
             sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql')
 
-
     def check_database(self, config: Configuration) -> None:
         """ Check that the tokenizer is set up correctly.
         """
         # Will throw an error if there is an issue.
         self.init_from_project(config)
 
-
     def update_statistics(self, config: Configuration, threads: int = 2) -> None:
         """ Recompute frequencies for all name words.
         """
         with connect(self.dsn) as conn:
-            if not conn.table_exists('search_name'):
+            if not table_exists(conn, 'search_name'):
                 return
 
             with conn.cursor() as cur:
                 cur.execute('ANALYSE search_name')
                 if threads > 1:
-                    cur.execute('SET max_parallel_workers_per_gather TO %s',
-                                (min(threads, 6),))
+                    cur.execute(pysql.SQL('SET max_parallel_workers_per_gather TO {}')
+                                     .format(pysql.Literal(min(threads, 6),)))
 
-                if conn.server_version_tuple() < (12, 0):
+                if server_version_tuple(conn) < (12, 0):
                     LOG.info('Computing word frequencies')
-                    cur.drop_table('word_frequencies')
-                    cur.drop_table('addressword_frequencies')
+                    drop_tables(conn, 'word_frequencies', 'addressword_frequencies')
                     cur.execute("""CREATE TEMP TABLE word_frequencies AS
                                      SELECT unnest(name_vector) as id, count(*)
                                      FROM search_name GROUP BY id""")
@@ -129,40 +121,40 @@ class ICUTokenizer(AbstractTokenizer):
                                      SELECT unnest(nameaddress_vector) as id, count(*)
                                      FROM search_name GROUP BY id""")
                     cur.execute('CREATE INDEX ON addressword_frequencies(id)')
-                    cur.execute("""CREATE OR REPLACE FUNCTION word_freq_update(wid INTEGER,
-                                                                               INOUT info JSONB)
-                                   AS $$
-                                   DECLARE rec RECORD;
-                                   BEGIN
-                                   IF info is null THEN
-                                     info = '{}'::jsonb;
-                                   END IF;
-                                   FOR rec IN SELECT count FROM word_frequencies WHERE id = wid
-                                   LOOP
-                                     info = info || jsonb_build_object('count', rec.count);
-                                   END LOOP;
-                                   FOR rec IN SELECT count FROM addressword_frequencies WHERE id = wid
-                                   LOOP
-                                     info = info || jsonb_build_object('addr_count', rec.count);
-                                   END LOOP;
-                                   IF info = '{}'::jsonb THEN
-                                     info = null;
-                                   END IF;
-                                   END;
-                                   $$ LANGUAGE plpgsql IMMUTABLE;
-                                """)
+                    cur.execute("""
+                        CREATE OR REPLACE FUNCTION word_freq_update(wid INTEGER,
+                                                                    INOUT info JSONB)
+                        AS $$
+                        DECLARE rec RECORD;
+                        BEGIN
+                        IF info is null THEN
+                          info = '{}'::jsonb;
+                        END IF;
+                        FOR rec IN SELECT count FROM word_frequencies WHERE id = wid
+                        LOOP
+                          info = info || jsonb_build_object('count', rec.count);
+                        END LOOP;
+                        FOR rec IN SELECT count FROM addressword_frequencies WHERE id = wid
+                        LOOP
+                          info = info || jsonb_build_object('addr_count', rec.count);
+                        END LOOP;
+                        IF info = '{}'::jsonb THEN
+                          info = null;
+                        END IF;
+                        END;
+                        $$ LANGUAGE plpgsql IMMUTABLE;
+                        """)
                     LOG.info('Update word table with recomputed frequencies')
-                    cur.drop_table('tmp_word')
+                    drop_tables(conn, 'tmp_word')
                     cur.execute("""CREATE TABLE tmp_word AS
                                     SELECT word_id, word_token, type, word,
                                            word_freq_update(word_id, info) as info
                                     FROM word
                                 """)
-                    cur.drop_table('word_frequencies')
-                    cur.drop_table('addressword_frequencies')
+                    drop_tables(conn, 'word_frequencies', 'addressword_frequencies')
                 else:
                     LOG.info('Computing word frequencies')
-                    cur.drop_table('word_frequencies')
+                    drop_tables(conn, 'word_frequencies')
                     cur.execute("""
                       CREATE TEMP TABLE word_frequencies AS
                       WITH word_freq AS MATERIALIZED (
@@ -182,7 +174,7 @@ class ICUTokenizer(AbstractTokenizer):
                     cur.execute('CREATE UNIQUE INDEX ON word_frequencies(id) INCLUDE(info)')
                     cur.execute('ANALYSE word_frequencies')
                     LOG.info('Update word table with recomputed frequencies')
-                    cur.drop_table('tmp_word')
+                    drop_tables(conn, 'tmp_word')
                     cur.execute("""CREATE TABLE tmp_word AS
                                     SELECT word_id, word_token, type, word,
                                            (CASE WHEN wf.info is null THEN word.info
@@ -191,7 +183,7 @@ class ICUTokenizer(AbstractTokenizer):
                                     FROM word LEFT JOIN word_frequencies wf
                                          ON word.word_id = wf.id
                                 """)
-                    cur.drop_table('word_frequencies')
+                    drop_tables(conn, 'word_frequencies')
 
             with conn.cursor() as cur:
                 cur.execute('SET max_parallel_workers_per_gather TO 0')
@@ -204,13 +196,11 @@ class ICUTokenizer(AbstractTokenizer):
         self._create_lookup_indices(config, 'tmp_word')
         self._move_temporary_word_table('tmp_word')
 
-
-
     def _cleanup_housenumbers(self) -> None:
         """ Remove unused house numbers.
         """
         with connect(self.dsn) as conn:
-            if not conn.table_exists('search_name'):
+            if not table_exists(conn, 'search_name'):
                 return
             with conn.cursor(name="hnr_counter") as cur:
                 cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
@@ -239,8 +229,6 @@ class ICUTokenizer(AbstractTokenizer):
                                 (list(candidates.values()), ))
                 conn.commit()
 
-
-
     def update_word_tokens(self) -> None:
         """ Remove unused tokens.
         """
@@ -248,7 +236,6 @@ class ICUTokenizer(AbstractTokenizer):
         self._cleanup_housenumbers()
         LOG.warning("Tokenizer house-keeping done.")
 
-
     def name_analyzer(self) -> 'ICUNameAnalyzer':
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should
@@ -268,7 +255,6 @@ class ICUTokenizer(AbstractTokenizer):
         return ICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(),
                                self.loader.make_token_analysis())
 
-
     def most_frequent_words(self, conn: Connection, num: int) -> List[str]:
         """ Return a list of the `num` most frequent full words
             in the database.
@@ -280,23 +266,6 @@ class ICUTokenizer(AbstractTokenizer):
                              ORDER BY count DESC LIMIT %s""", (num,))
             return list(s[0].split('@')[0] for s in cur)
 
-
-    def _install_php(self, phpdir: Optional[Path], overwrite: bool = True) -> None:
-        """ Install the php script for the tokenizer.
-        """
-        if phpdir is not None:
-            assert self.loader is not None
-            php_file = self.data_dir / "tokenizer.php"
-
-            if not php_file.exists() or overwrite:
-                php_file.write_text(dedent(f"""\
-                    <?php
-                    @define('CONST_Max_Word_Frequency', 10000000);
-                    @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
-                    @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
-                    require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""), encoding='utf-8')
-
-
     def _save_config(self) -> None:
         """ Save the configuration that needs to remain stable for the given
             database as database properties.
@@ -305,14 +274,12 @@ class ICUTokenizer(AbstractTokenizer):
         with connect(self.dsn) as conn:
             self.loader.save_config_to_db(conn)
 
-
     def _setup_db_tables(self, config: Configuration) -> None:
         """ Set up the word table and fill it with pre-computed word
             frequencies.
         """
         with connect(self.dsn) as conn:
-            with conn.cursor() as cur:
-                cur.drop_table('word')
+            drop_tables(conn, 'word')
             sqlp = SQLPreprocessor(conn, config)
             sqlp.run_string(conn, """
                 CREATE TABLE word (
@@ -330,7 +297,6 @@ class ICUTokenizer(AbstractTokenizer):
             """)
             conn.commit()
 
-
     def _create_base_indices(self, config: Configuration, table_name: str) -> None:
         """ Set up the word table and fill it with pre-computed word
             frequencies.
@@ -351,27 +317,27 @@ class ICUTokenizer(AbstractTokenizer):
                                 column_type=ctype)
             conn.commit()
 
-
     def _create_lookup_indices(self, config: Configuration, table_name: str) -> None:
         """ Create additional indexes used when running the API.
         """
         with connect(self.dsn) as conn:
             sqlp = SQLPreprocessor(conn, config)
             # Index required for details lookup.
-            sqlp.run_string(conn, """
+            sqlp.run_string(
+                conn,
+                """
                 CREATE INDEX IF NOT EXISTS idx_{{table_name}}_word_id
                   ON {{table_name}} USING BTREE (word_id) {{db.tablespace.search_index}}
-            """,
-            table_name=table_name)
+                """,
+                table_name=table_name)
             conn.commit()
 
-
     def _move_temporary_word_table(self, old: str) -> None:
         """ Rename all tables and indexes used by the tokenizer.
         """
         with connect(self.dsn) as conn:
+            drop_tables(conn, 'word')
             with conn.cursor() as cur:
-                cur.drop_table('word')
                 cur.execute(f"ALTER TABLE {old} RENAME TO word")
                 for idx in ('word_token', 'word_id'):
                     cur.execute(f"""ALTER INDEX idx_{old}_{idx}
@@ -382,8 +348,6 @@ class ICUTokenizer(AbstractTokenizer):
             conn.commit()
 
 
-
-
 class ICUNameAnalyzer(AbstractAnalyzer):
     """ The ICU analyzer uses the ICU library for splitting names.
 
@@ -393,14 +357,13 @@ class ICUNameAnalyzer(AbstractAnalyzer):
 
     def __init__(self, dsn: str, sanitizer: PlaceSanitizer,
                  token_analysis: ICUTokenAnalysis) -> None:
-        self.conn: Optional[Connection] = connect(dsn).connection
+        self.conn: Optional[Connection] = connect(dsn)
         self.conn.autocommit = True
         self.sanitizer = sanitizer
         self.token_analysis = token_analysis
 
         self._cache = _TokenCache()
 
-
     def close(self) -> None:
         """ Free all resources used by the analyzer.
         """
@@ -408,20 +371,17 @@ class ICUNameAnalyzer(AbstractAnalyzer):
             self.conn.close()
             self.conn = None
 
-
     def _search_normalized(self, name: str) -> str:
         """ Return the search token transliteration of the given name.
         """
         return cast(str, self.token_analysis.search.transliterate(name)).strip()
 
-
     def _normalized(self, name: str) -> str:
         """ Return the normalized version of the given name with all
             non-relevant information removed.
         """
         return cast(str, self.token_analysis.normalizer.transliterate(name)).strip()
 
-
     def get_word_token_info(self, words: Sequence[str]) -> List[Tuple[str, str, int]]:
         """ Return token information for the given list of words.
             If a word starts with # it is assumed to be a full name
@@ -453,8 +413,7 @@ class ICUNameAnalyzer(AbstractAnalyzer):
             part_ids = {r[0]: r[1] for r in cur}
 
         return [(k, v, full_ids.get(v, None)) for k, v in full_tokens.items()] \
-               + [(k, v, part_ids.get(v, None)) for k, v in partial_tokens.items()]
-
+            + [(k, v, part_ids.get(v, None)) for k, v in partial_tokens.items()]
 
     def normalize_postcode(self, postcode: str) -> str:
         """ Convert the postcode to a standardized form.
@@ -464,7 +423,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
         """
         return postcode.strip().upper()
 
-
     def update_postcodes_from_db(self) -> None:
         """ Update postcode tokens in the word table from the location_postcode
             table.
@@ -535,12 +493,7 @@ class ICUNameAnalyzer(AbstractAnalyzer):
 
         if terms:
             with self.conn.cursor() as cur:
-                cur.execute_values("""SELECT create_postcode_word(pc, var)
-                                      FROM (VALUES %s) AS v(pc, var)""",
-                                   terms)
-
-
-
+                cur.executemany("""SELECT create_postcode_word(%s, %s)""", terms)
 
     def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
                                should_replace: bool) -> None:
@@ -571,7 +524,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
         LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
                  len(norm_phrases), added, deleted)
 
-
     def _add_special_phrases(self, cursor: Cursor,
                              new_phrases: Set[Tuple[str, str, str, str]],
                              existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
@@ -580,40 +532,35 @@ class ICUNameAnalyzer(AbstractAnalyzer):
         to_add = new_phrases - existing_phrases
 
         added = 0
-        with CopyBuffer() as copystr:
+        with cursor.copy('COPY word(word_token, type, word, info) FROM STDIN') as copy:
             for word, cls, typ, oper in to_add:
                 term = self._search_normalized(word)
                 if term:
-                    copystr.add(term, 'S', word,
-                                json.dumps({'class': cls, 'type': typ,
-                                            'op': oper if oper in ('in', 'near') else None}))
+                    copy.write_row((term, 'S', word,
+                                    Jsonb({'class': cls, 'type': typ,
+                                           'op': oper if oper in ('in', 'near') else None})))
                     added += 1
 
-            copystr.copy_out(cursor, 'word',
-                             columns=['word_token', 'type', 'word', 'info'])
-
         return added
 
-
     def _remove_special_phrases(self, cursor: Cursor,
-                             new_phrases: Set[Tuple[str, str, str, str]],
-                             existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
+                                new_phrases: Set[Tuple[str, str, str, str]],
+                                existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
         """ Remove all phrases from the database that are no longer in the
             new phrase list.
         """
         to_delete = existing_phrases - new_phrases
 
         if to_delete:
-            cursor.execute_values(
-                """ DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
-                    WHERE type = 'S' and word = name
-                          and info->>'class' = in_class and info->>'type' = in_type
-                          and ((op = '-' and info->>'op' is null) or op = info->>'op')
+            cursor.executemany(
+                """ DELETE FROM word
+                      WHERE type = 'S' and word = %s
+                            and info->>'class' = %s and info->>'type' = %s
+                            and %s = coalesce(info->>'op', '-')
                 """, to_delete)
 
         return len(to_delete)
 
-
     def add_country_names(self, country_code: str, names: Mapping[str, str]) -> None:
         """ Add default names for the given country to the search index.
         """
@@ -625,7 +572,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
                                      self.sanitizer.process_names(info)[0],
                                      internal=True)
 
-
     def _add_country_full_names(self, country_code: str, names: Sequence[PlaceName],
                                 internal: bool = False) -> None:
         """ Add names for the given country from an already sanitized
@@ -655,7 +601,7 @@ class ICUNameAnalyzer(AbstractAnalyzer):
                 gone_tokens.update(existing_tokens[False] & word_tokens)
             if gone_tokens:
                 cur.execute("""DELETE FROM word
-                               USING unnest(%s) as token
+                               USING unnest(%s::text[]) as token
                                WHERE type = 'C' and word = %s
                                      and word_token = token""",
                             (list(gone_tokens), country_code))
@@ -668,16 +614,15 @@ class ICUNameAnalyzer(AbstractAnalyzer):
                 if internal:
                     sql = """INSERT INTO word (word_token, type, word, info)
                                (SELECT token, 'C', %s, '{"internal": "yes"}'
-                                  FROM unnest(%s) as token)
+                                  FROM unnest(%s::text[]) as token)
                            """
                 else:
                     sql = """INSERT INTO word (word_token, type, word)
                                    (SELECT token, 'C', %s
-                                    FROM unnest(%s) as token)
+                                    FROM unnest(%s::text[]) as token)
                           """
                 cur.execute(sql, (country_code, list(new_tokens)))
 
-
     def process_place(self, place: PlaceInfo) -> Mapping[str, Any]:
         """ Determine tokenizer information about the given place.
 
@@ -700,7 +645,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
 
         return token_info.to_dict()
 
-
     def _process_place_address(self, token_info: '_TokenInfo',
                                address: Sequence[PlaceName]) -> None:
         for item in address:
@@ -713,12 +657,11 @@ class ICUNameAnalyzer(AbstractAnalyzer):
             elif item.kind == 'place':
                 if not item.suffix:
                     token_info.add_place(itertools.chain(*self._compute_name_tokens([item])))
-            elif not item.kind.startswith('_') and not item.suffix and \
-                 item.kind not in ('country', 'full', 'inclusion'):
+            elif (not item.kind.startswith('_') and not item.suffix and
+                  item.kind not in ('country', 'full', 'inclusion')):
                 token_info.add_address_term(item.kind,
                                             itertools.chain(*self._compute_name_tokens([item])))
 
-
     def _compute_housenumber_token(self, hnr: PlaceName) -> Tuple[Optional[int], Optional[str]]:
         """ Normalize the housenumber and return the word token and the
             canonical form.
@@ -733,11 +676,10 @@ class ICUNameAnalyzer(AbstractAnalyzer):
             if norm_name:
                 result = self._cache.housenumbers.get(norm_name, result)
                 if result[0] is None:
-                    with self.conn.cursor() as cur:
-                        hid = cur.scalar("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
+                    hid = execute_scalar(self.conn, "SELECT getorcreate_hnr_id(%s)", (norm_name, ))
 
-                        result = hid, norm_name
-                        self._cache.housenumbers[norm_name] = result
+                    result = hid, norm_name
+                    self._cache.housenumbers[norm_name] = result
         else:
             # Otherwise use the analyzer to determine the canonical name.
             # Per convention we use the first variant as the 'lookup name', the
@@ -748,15 +690,13 @@ class ICUNameAnalyzer(AbstractAnalyzer):
                 if result[0] is None:
                     variants = analyzer.compute_variants(word_id)
                     if variants:
-                        with self.conn.cursor() as cur:
-                            hid = cur.scalar("SELECT create_analyzed_hnr_id(%s, %s)",
+                        hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
                                              (word_id, list(variants)))
-                            result = hid, variants[0]
-                            self._cache.housenumbers[word_id] = result
+                        result = hid, variants[0]
+                        self._cache.housenumbers[word_id] = result
 
         return result
 
-
     def _retrieve_full_tokens(self, name: str) -> List[int]:
         """ Get the full name token for the given name, if it exists.
             The name is only retrieved for the standard analyser.
@@ -777,7 +717,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
 
         return full
 
-
     def _compute_name_tokens(self, names: Sequence[PlaceName]) -> Tuple[Set[int], Set[int]]:
         """ Computes the full name and partial name tokens for the given
             dictionary of names.
@@ -815,7 +754,6 @@ class ICUNameAnalyzer(AbstractAnalyzer):
 
         return full_tokens, partial_tokens
 
-
     def _add_postcode(self, item: PlaceName) -> Optional[str]:
         """ Make sure the normalized postcode is present in the word table.
         """
@@ -863,11 +801,9 @@ class _TokenInfo:
         self.address_tokens: Dict[str, str] = {}
         self.postcode: Optional[str] = None
 
-
     def _mk_array(self, tokens: Iterable[Any]) -> str:
         return f"{{{','.join((str(s) for s in tokens))}}}"
 
-
     def to_dict(self) -> Dict[str, Any]:
         """ Return the token information in database importable format.
         """
@@ -894,13 +830,11 @@ class _TokenInfo:
 
         return out
 
-
     def set_names(self, fulls: Iterable[int], partials: Iterable[int]) -> None:
         """ Adds token information for the normalised names.
         """
         self.names = self._mk_array(itertools.chain(fulls, partials))
 
-
     def add_housenumber(self, token: Optional[int], hnr: Optional[str]) -> None:
         """ Extract housenumber information from a list of normalised
             housenumbers.
@@ -910,7 +844,6 @@ class _TokenInfo:
             self.housenumbers.add(hnr)
             self.housenumber_tokens.add(token)
 
-
     def add_street(self, tokens: Iterable[int]) -> None:
         """ Add addr:street match terms.
         """
@@ -918,13 +851,11 @@ class _TokenInfo:
             self.street_tokens = set()
         self.street_tokens.update(tokens)
 
-
     def add_place(self, tokens: Iterable[int]) -> None:
         """ Add addr:place search and match terms.
         """
         self.place_tokens.update(tokens)
 
-
     def add_address_term(self, key: str, partials: Iterable[int]) -> None:
         """ Add additional address terms.
         """