LOG = logging.getLogger()
+WORD_TYPES =(('country_names', 'C'),
+ ('postcodes', 'P'),
+ ('full_word', 'W'),
+ ('housenumbers', 'H'))
+
def create(dsn: str, data_dir: Path) -> 'ICUTokenizer':
""" Create a new instance of the tokenizer provided by this module.
"""
if init_db:
self.update_sql_functions(config)
- self._init_db_tables(config)
+ self._setup_db_tables(config)
+ self._create_base_indices(config, 'word')
def init_from_project(self, config: Configuration) -> None:
""" Do any required postprocessing to make the tokenizer data ready
for use.
"""
- with connect(self.dsn) as conn:
- sqlp = SQLPreprocessor(conn, config)
- sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
+ self._create_lookup_indices(config, 'word')
def update_sql_functions(self, config: Configuration) -> None:
self.init_from_project(config)
- def update_statistics(self) -> None:
+ def update_statistics(self, config: Configuration, threads: int = 2) -> None:
""" Recompute frequencies for all name words.
"""
with connect(self.dsn) as conn:
- if conn.table_exists('search_name'):
- with conn.cursor() as cur:
- cur.drop_table("word_frequencies")
- LOG.info("Computing word frequencies")
+ if not conn.table_exists('search_name'):
+ return
+
+ with conn.cursor() as cur:
+ cur.execute('ANALYSE search_name')
+ if threads > 1:
+ cur.execute('SET max_parallel_workers_per_gather TO %s',
+ (min(threads, 6),))
+
+ if conn.server_version_tuple() < (12, 0):
+ LOG.info('Computing word frequencies')
+ cur.drop_table('word_frequencies')
+ cur.drop_table('addressword_frequencies')
cur.execute("""CREATE TEMP TABLE word_frequencies AS
SELECT unnest(name_vector) as id, count(*)
FROM search_name GROUP BY id""")
- cur.execute("CREATE INDEX ON word_frequencies(id)")
- LOG.info("Update word table with recomputed frequencies")
- cur.execute("""UPDATE word
- SET info = info || jsonb_build_object('count', count)
- FROM word_frequencies WHERE word_id = id""")
- cur.drop_table("word_frequencies")
+ cur.execute('CREATE INDEX ON word_frequencies(id)')
+ cur.execute("""CREATE TEMP TABLE addressword_frequencies AS
+ SELECT unnest(nameaddress_vector) as id, count(*)
+ FROM search_name GROUP BY id""")
+ cur.execute('CREATE INDEX ON addressword_frequencies(id)')
+ cur.execute("""CREATE OR REPLACE FUNCTION word_freq_update(wid INTEGER,
+ INOUT info JSONB)
+ AS $$
+ DECLARE rec RECORD;
+ BEGIN
+ IF info is null THEN
+ info = '{}'::jsonb;
+ END IF;
+ FOR rec IN SELECT count FROM word_frequencies WHERE id = wid
+ LOOP
+ info = info || jsonb_build_object('count', rec.count);
+ END LOOP;
+ FOR rec IN SELECT count FROM addressword_frequencies WHERE id = wid
+ LOOP
+ info = info || jsonb_build_object('addr_count', rec.count);
+ END LOOP;
+ IF info = '{}'::jsonb THEN
+ info = null;
+ END IF;
+ END;
+ $$ LANGUAGE plpgsql IMMUTABLE;
+ """)
+ LOG.info('Update word table with recomputed frequencies')
+ cur.drop_table('tmp_word')
+ cur.execute("""CREATE TABLE tmp_word AS
+ SELECT word_id, word_token, type, word,
+ word_freq_update(word_id, info) as info
+ FROM word
+ """)
+ cur.drop_table('word_frequencies')
+ cur.drop_table('addressword_frequencies')
+ else:
+ LOG.info('Computing word frequencies')
+ cur.drop_table('word_frequencies')
+ cur.execute("""
+ CREATE TEMP TABLE word_frequencies AS
+ WITH word_freq AS MATERIALIZED (
+ SELECT unnest(name_vector) as id, count(*)
+ FROM search_name GROUP BY id),
+ addr_freq AS MATERIALIZED (
+ SELECT unnest(nameaddress_vector) as id, count(*)
+ FROM search_name GROUP BY id)
+ SELECT coalesce(a.id, w.id) as id,
+ (CASE WHEN w.count is null THEN '{}'::JSONB
+ ELSE jsonb_build_object('count', w.count) END
+ ||
+ CASE WHEN a.count is null THEN '{}'::JSONB
+ ELSE jsonb_build_object('addr_count', a.count) END) as info
+ FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
+ """)
+ cur.execute('CREATE UNIQUE INDEX ON word_frequencies(id) INCLUDE(info)')
+ cur.execute('ANALYSE word_frequencies')
+ LOG.info('Update word table with recomputed frequencies')
+ cur.drop_table('tmp_word')
+ cur.execute("""CREATE TABLE tmp_word AS
+ SELECT word_id, word_token, type, word,
+ (CASE WHEN wf.info is null THEN word.info
+ ELSE coalesce(word.info, '{}'::jsonb) || wf.info
+ END) as info
+ FROM word LEFT JOIN word_frequencies wf
+ ON word.word_id = wf.id
+ """)
+ cur.drop_table('word_frequencies')
+
+ with conn.cursor() as cur:
+ cur.execute('SET max_parallel_workers_per_gather TO 0')
+
+ sqlp = SQLPreprocessor(conn, config)
+ sqlp.run_string(conn,
+ 'GRANT SELECT ON tmp_word TO "{{config.DATABASE_WEBUSER}}"')
conn.commit()
+ self._create_base_indices(config, 'tmp_word')
+ self._create_lookup_indices(config, 'tmp_word')
+ self._move_temporary_word_table('tmp_word')
+
def _cleanup_housenumbers(self) -> None:
self.loader.make_token_analysis())
- def _install_php(self, phpdir: Path, overwrite: bool = True) -> None:
+ def most_frequent_words(self, conn: Connection, num: int) -> List[str]:
+ """ Return a list of the `num` most frequent full words
+ in the database.
+ """
+ with conn.cursor() as cur:
+ cur.execute("""SELECT word, sum((info->>'count')::int) as count
+ FROM word WHERE type = 'W'
+ GROUP BY word
+ ORDER BY count DESC LIMIT %s""", (num,))
+ return list(s[0].split('@')[0] for s in cur)
+
+
+ def _install_php(self, phpdir: Optional[Path], overwrite: bool = True) -> None:
""" Install the php script for the tokenizer.
"""
- assert self.loader is not None
- php_file = self.data_dir / "tokenizer.php"
+ if phpdir is not None:
+ assert self.loader is not None
+ php_file = self.data_dir / "tokenizer.php"
- if not php_file.exists() or overwrite:
- php_file.write_text(dedent(f"""\
- <?php
- @define('CONST_Max_Word_Frequency', 10000000);
- @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
- @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
- require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""), encoding='utf-8')
+ if not php_file.exists() or overwrite:
+ php_file.write_text(dedent(f"""\
+ <?php
+ @define('CONST_Max_Word_Frequency', 10000000);
+ @define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
+ @define('CONST_Transliteration', "{self.loader.get_search_rules()}");
+ require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""), encoding='utf-8')
def _save_config(self) -> None:
self.loader.save_config_to_db(conn)
- def _init_db_tables(self, config: Configuration) -> None:
+ def _setup_db_tables(self, config: Configuration) -> None:
+ """ Set up the word table and fill it with pre-computed word
+ frequencies.
+ """
+ with connect(self.dsn) as conn:
+ with conn.cursor() as cur:
+ cur.drop_table('word')
+ sqlp = SQLPreprocessor(conn, config)
+ sqlp.run_string(conn, """
+ CREATE TABLE word (
+ word_id INTEGER,
+ word_token text NOT NULL,
+ type text NOT NULL,
+ word text,
+ info jsonb
+ ) {{db.tablespace.search_data}};
+ GRANT SELECT ON word TO "{{config.DATABASE_WEBUSER}}";
+
+ DROP SEQUENCE IF EXISTS seq_word;
+ CREATE SEQUENCE seq_word start 1;
+ GRANT SELECT ON seq_word to "{{config.DATABASE_WEBUSER}}";
+ """)
+ conn.commit()
+
+
+ def _create_base_indices(self, config: Configuration, table_name: str) -> None:
""" Set up the word table and fill it with pre-computed word
frequencies.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
- sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer_tables.sql')
+ sqlp.run_string(conn,
+ """CREATE INDEX idx_{{table_name}}_word_token ON {{table_name}}
+ USING BTREE (word_token) {{db.tablespace.search_index}}""",
+ table_name=table_name)
+ for name, ctype in WORD_TYPES:
+ sqlp.run_string(conn,
+ """CREATE INDEX idx_{{table_name}}_{{idx_name}} ON {{table_name}}
+ USING BTREE (word) {{db.tablespace.address_index}}
+ WHERE type = '{{column_type}}'
+ """,
+ table_name=table_name, idx_name=name,
+ column_type=ctype)
conn.commit()
+ def _create_lookup_indices(self, config: Configuration, table_name: str) -> None:
+ """ Create additional indexes used when running the API.
+ """
+ with connect(self.dsn) as conn:
+ sqlp = SQLPreprocessor(conn, config)
+ # Index required for details lookup.
+ sqlp.run_string(conn, """
+ CREATE INDEX IF NOT EXISTS idx_{{table_name}}_word_id
+ ON {{table_name}} USING BTREE (word_id) {{db.tablespace.search_index}}
+ """,
+ table_name=table_name)
+ conn.commit()
+
+
+ def _move_temporary_word_table(self, old: str) -> None:
+ """ Rename all tables and indexes used by the tokenizer.
+ """
+ with connect(self.dsn) as conn:
+ with conn.cursor() as cur:
+ cur.drop_table('word')
+ cur.execute(f"ALTER TABLE {old} RENAME TO word")
+ for idx in ('word_token', 'word_id'):
+ cur.execute(f"""ALTER INDEX idx_{old}_{idx}
+ RENAME TO idx_word_{idx}""")
+ for name, _ in WORD_TYPES:
+ cur.execute(f"""ALTER INDEX idx_{old}_{name}
+ RENAME TO idx_word_{name}""")
+ conn.commit()
+
+
+
+
class ICUNameAnalyzer(AbstractAnalyzer):
""" The ICU analyzer uses the ICU library for splitting names.
token_info.add_street(self._retrieve_full_tokens(item.name))
elif item.kind == 'place':
if not item.suffix:
- token_info.add_place(self._compute_partial_tokens(item.name))
+ token_info.add_place(itertools.chain(*self._compute_name_tokens([item])))
elif not item.kind.startswith('_') and not item.suffix and \
item.kind not in ('country', 'full', 'inclusion'):
- token_info.add_address_term(item.kind, self._compute_partial_tokens(item.name))
+ token_info.add_address_term(item.kind,
+ itertools.chain(*self._compute_name_tokens([item])))
def _compute_housenumber_token(self, hnr: PlaceName) -> Tuple[Optional[int], Optional[str]]:
return result
- def _compute_partial_tokens(self, name: str) -> List[int]:
- """ Normalize the given term, split it into partial words and return
- then token list for them.
- """
- assert self.conn is not None
- norm_name = self._search_normalized(name)
-
- tokens = []
- need_lookup = []
- for partial in norm_name.split():
- token = self._cache.partials.get(partial)
- if token:
- tokens.append(token)
- else:
- need_lookup.append(partial)
-
- if need_lookup:
- with self.conn.cursor() as cur:
- cur.execute("""SELECT word, getorcreate_partial_word(word)
- FROM unnest(%s) word""",
- (need_lookup, ))
-
- for partial, token in cur:
- assert token is not None
- tokens.append(token)
- self._cache.partials[partial] = token
-
- return tokens
-
-
def _retrieve_full_tokens(self, name: str) -> List[int]:
""" Get the full name token for the given name, if it exists.
The name is only retrieved for the standard analyser.
self.names: Optional[str] = None
self.housenumbers: Set[str] = set()
self.housenumber_tokens: Set[int] = set()
- self.street_tokens: Set[int] = set()
+ self.street_tokens: Optional[Set[int]] = None
self.place_tokens: Set[int] = set()
self.address_tokens: Dict[str, str] = {}
self.postcode: Optional[str] = None
out['hnr'] = ';'.join(self.housenumbers)
out['hnr_tokens'] = self._mk_array(self.housenumber_tokens)
- if self.street_tokens:
+ if self.street_tokens is not None:
out['street'] = self._mk_array(self.street_tokens)
if self.place_tokens:
def add_street(self, tokens: Iterable[int]) -> None:
""" Add addr:street match terms.
"""
+ if self.street_tokens is None:
+ self.street_tokens = set()
self.street_tokens.update(tokens)
def add_address_term(self, key: str, partials: Iterable[int]) -> None:
""" Add additional address terms.
"""
- if partials:
- self.address_tokens[key] = self._mk_array(partials)
+ array = self._mk_array(partials)
+ if len(array) > 2:
+ self.address_tokens[key] = array
def set_postcode(self, postcode: Optional[str]) -> None:
""" Set the postcode to the given one.