if self.geometry == nil then
self.geometry = self.geom_func(self.object)
end
- if self.geometry:is_null() then
+ if self.geometry == nil or self.geometry:is_null() then
return 0
end
if geom:is_null() then
geom = o:as_linestring()
+ if geom:is_null() or geom:length() > 30 then
+ return nil
+ end
end
return geom
query-preprocessing:
+ - step: split_japanese_phrases
- step: normalize
normalization:
- ":: lower ()"
- "'nº' > 'no'"
- "ª > a"
- "º > o"
- - "[[:Punctuation:][:Symbol:]\u02bc] > ' '"
+ - "[[:Punctuation:][:Symbol:][\u02bc] - [-:]]+ > '-'"
- "ß > 'ss'" # German szet is unambiguously equal to double ss
- - "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:]] >"
+ - "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:] [-:]] >"
- "[:Lm:] >"
- ":: [[:Number:]] Latin ()"
- ":: [[:Number:]] Ascii ();"
- ":: [[:Number:]] NFD ();"
- "[[:Nonspacing Mark:] [:Cf:]] >;"
- - "[:Space:]+ > ' '"
+ - "[-:]?[:Space:]+[-:]? > ' '"
transliteration:
+ - "[-:] > ' '"
- ":: Latin ()"
- !include icu-rules/extended-unicode-to-asccii.yaml
- ":: Ascii ()"
--- /dev/null
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2025 by the Nominatim developer community.
+# For a full list of authors see the git log.
+"""
+This file divides Japanese addresses into three categories:
+prefecture, municipality, and other.
+The division is not strict but simple using these keywords.
+"""
+from typing import List
+import re
+
+from .config import QueryConfig
+from .base import QueryProcessingFunc
+from ..search.query import Phrase
+
+MATCH_PATTERNS = [
+ r'''
+ (...??[都都道府県縣]) # [group1] prefecture
+ (.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
+ (.+) # [group3] other words
+ ''',
+ r'''
+ (...??[都都道府県縣]) # [group1] prefecture
+ (.+) # [group3] other words
+ ''',
+ r'''
+ (.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
+ (.+) # [group3] other words
+ '''
+]
+
+
+class _JapanesePreprocessing:
+
+ def __init__(self, config: QueryConfig) -> None:
+ self.config = config
+
+ def split_phrase(self, phrase: Phrase) -> Phrase:
+ """
+ This function performs a division on the given text using a regular expression.
+ """
+ for pattern in MATCH_PATTERNS:
+ result = re.match(pattern, phrase.text, re.VERBOSE)
+ if result is not None:
+ return Phrase(phrase.ptype, ':'.join(result.groups()))
+
+ return phrase
+
+ def __call__(self, phrases: List[Phrase]) -> List[Phrase]:
+ """Split a Japanese address using japanese_tokenizer.
+ """
+ return [self.split_phrase(p) for p in phrases]
+
+
+def create(config: QueryConfig) -> QueryProcessingFunc:
+ """ Create a function of japanese preprocessing.
+ """
+ return _JapanesePreprocessing(config)
BreakType.START: 0.0,
BreakType.END: 0.0,
BreakType.PHRASE: 0.0,
+ BreakType.SOFT_PHRASE: 0.0,
BreakType.WORD: 0.1,
BreakType.PART: 0.2,
BreakType.TOKEN: 0.4
"""
assert self.query_analyzer is not None
qwords = [word for phrase in query.source
- for word in re.split('[, ]+', phrase.text) if word]
+ for word in re.split('[-,: ]+', phrase.text) if word]
if not qwords:
return
distance = 0.0
norm = self.query_analyzer.normalize_text(' '.join((result.display_name,
result.country_code or '')))
- words = set((w for w in norm.split(' ') if w))
+ words = set((w for w in re.split('[-,: ]+', norm) if w))
if not words:
continue
for qword in qwords:
"""
Implementation of query analysis for the ICU tokenizer.
"""
-from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
+from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
from collections import defaultdict
import dataclasses
import difflib
+import re
+from itertools import zip_longest
from icu import Transliterator
'C': qmod.TokenType.COUNTRY
}
+PENALTY_IN_TOKEN_BREAK = {
+ qmod.BreakType.START: 0.5,
+ qmod.BreakType.END: 0.5,
+ qmod.BreakType.PHRASE: 0.5,
+ qmod.BreakType.SOFT_PHRASE: 0.5,
+ qmod.BreakType.WORD: 0.1,
+ qmod.BreakType.PART: 0.0,
+ qmod.BreakType.TOKEN: 0.0
+}
+
-class QueryPart(NamedTuple):
+@dataclasses.dataclass
+class QueryPart:
""" Normalized and transliterated form of a single term in the query.
When the term came out of a split during the transliteration,
the normalized string is the full word before transliteration.
The word number keeps track of the word before transliteration
and can be used to identify partial transliterated terms.
+ Penalty is the break penalty for the break following the token.
"""
token: str
normalized: str
word_number: int
+ penalty: float
QueryParts = List[QueryPart]
total = len(terms)
for first in range(start, total):
word = terms[first].token
- yield word, qmod.TokenRange(first, first + 1)
+ penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType.WORD]
+ yield word, qmod.TokenRange(first, first + 1, penalty=penalty)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last].token))
- yield word, qmod.TokenRange(first, last + 1)
+ penalty += terms[last - 1].penalty
+ yield word, qmod.TokenRange(first, last + 1, penalty=penalty)
@dataclasses.dataclass
self.penalty += (distance/len(self.lookup_word))
@staticmethod
- def from_db_row(row: SaRow) -> 'ICUToken':
+ def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
""" Create a ICUToken from the row of the word table.
"""
count = 1 if row.info is None else row.info.get('count', 1)
addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
- penalty = 0.0
+ penalty = base_penalty
if row.type == 'w':
- penalty = 0.3
+ penalty += 0.3
elif row.type == 'W':
if len(row.word_token) == 1 and row.word_token == row.word:
- penalty = 0.2 if row.word.isdigit() else 0.3
+ penalty += 0.2 if row.word.isdigit() else 0.3
elif row.type == 'H':
- penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
+ penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
if all(not c.isdigit() for c in row.word_token):
penalty += 0.2 * (len(row.word_token) - 1)
elif row.type == 'C':
if len(row.word_token) == 1:
- penalty = 0.3
+ penalty += 0.3
if row.info is None:
lookup_word = row.word
for row in await self.lookup_in_db(list(words.keys())):
for trange in words[row.word_token]:
- token = ICUToken.from_db_row(row)
+ token = ICUToken.from_db_row(row, trange.penalty or 0.0)
if row.type == 'S':
if row.info['op'] in ('in', 'near'):
if trange.start == 0:
wordnr = 0
for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype
- for word in phrase.text.split(' '):
+ phrase_split = re.split('([ :-])', phrase.text)
+ # The zip construct will give us the pairs of word/break from
+ # the regular expression split. As the split array ends on the
+ # final word, we simply use the fillvalue to even out the list and
+ # add the phrase break at the end.
+ for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
+ if not word:
+ continue
trans = self.transliterator.transliterate(word)
if trans:
for term in trans.split(' '):
if term:
- parts.append(QueryPart(term, word, wordnr))
+ parts.append(QueryPart(term, word, wordnr,
+ PENALTY_IN_TOKEN_BREAK[qmod.BreakType.TOKEN]))
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
- query.nodes[-1].btype = qmod.BreakType.WORD
+ query.nodes[-1].btype = qmod.BreakType(breakchar)
+ parts[-1].penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType(breakchar)]
wordnr += 1
- query.nodes[-1].btype = qmod.BreakType.PHRASE
for word, wrange in yield_words(parts, phrase_start):
words[word].append(wrange)
""" Add tokens to query that are not saved in the database.
"""
for part, node, i in zip(parts, query.nodes, range(1000)):
- if len(part.token) <= 4 and part[0].isdigit()\
+ if len(part.token) <= 4 and part.token.isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
ICUToken(penalty=0.5, token=0,
END = '>'
""" End of the query. """
PHRASE = ','
- """ Break between two phrases. """
+ """ Hard break between two phrases. Address parts cannot cross hard
+ phrase boundaries."""
+ SOFT_PHRASE = ':'
+ """ Likely break between two phrases. Address parts should not cross soft
+ phrase boundaries. Soft breaks can be inserted by a preprocessor
+ that is analysing the input string.
+ """
WORD = ' '
""" Break between words. """
PART = '-'
"""
start: int
end: int
+ penalty: Optional[float] = None
def __lt__(self, other: 'TokenRange') -> bool:
return self.end <= other.start
qmod.BreakType.START: 0.0,
qmod.BreakType.END: 0.0,
qmod.BreakType.PHRASE: 0.0,
+ qmod.BreakType.SOFT_PHRASE: 0.0,
qmod.BreakType.WORD: 0.1,
qmod.BreakType.PART: 0.2,
qmod.BreakType.TOKEN: 0.4
LOG.warning('Post-process tables')
with connect(args.config.get_libpq_dsn()) as conn:
+ conn.autocommit = True
await database_import.create_search_indices(conn, args.config,
drop=args.no_updates,
threads=num_threads)
LOG.warning('Create search index for default country names.')
+ conn.autocommit = False
country_info.create_country_names(conn, tokenizer,
args.config.get_str_list('LANGUAGES'))
if args.no_updates:
+ conn.autocommit = True
freeze.drop_update_tables(conn)
tokenizer.finalize_import(args.config)
from ..tools import database_import, refresh
with connect(config.get_libpq_dsn()) as conn:
+ conn.autocommit = True
LOG.warning('Create functions (1st pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create tables')
def __init__(self, norm_rules: str, trans_rules: str,
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
+ # additional break signs are not relevant during name analysis
+ norm_rules += ";[[:Space:][-:]]+ > ' ';"
self.normalizer = Transliterator.createFromRules("icu_normalization",
norm_rules)
trans_rules += ";[:Space:]+ > ' '"
--- /dev/null
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2025 by the Nominatim developer community.
+# For a full list of authors see the git log.
+"""
+Tests for japanese phrase splitting.
+"""
+from pathlib import Path
+
+import pytest
+
+from icu import Transliterator
+
+import nominatim_api.search.query as qmod
+from nominatim_api.query_preprocessing.config import QueryConfig
+from nominatim_api.query_preprocessing import split_japanese_phrases
+
+def run_preprocessor_on(query):
+ proc = split_japanese_phrases.create(QueryConfig().set_normalizer(None))
+
+ return proc(query)
+
+
+@pytest.mark.parametrize('inp,outp', [('大阪府大阪市大阪', '大阪府:大阪市:大阪'),
+ ('大阪府大阪', '大阪府:大阪'),
+ ('大阪市大阪', '大阪市:大阪')])
+def test_split_phrases(inp, outp):
+ query = [qmod.Phrase(qmod.PhraseType.NONE, inp)]
+
+ out = run_preprocessor_on(query)
+
+ assert out == [qmod.Phrase(qmod.PhraseType.NONE, outp)]