monkeypatch.undo()
def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
- variants=('~gasse -> gasse', 'street => st', )):
- cfgstr = {'normalization' : list(norm),
- 'transliteration' : list(trans),
- 'variants' : [ {'words': list(variants)}]}
+ variants=('~gasse -> gasse', 'street => st', ),
+ sanitizers=[]):
+ cfgstr = {'normalization': list(norm),
+ 'sanitizers': sanitizers,
+ 'transliteration': list(trans),
+ 'token-analysis': [{'analyzer': 'generic',
+ 'variants': [{'words': list(variants)}]}]}
(test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(cfgstr))
tok.loader = ICURuleLoader(test_config)
tok.init_new_db(test_config)
assert word_table.get_partial_words() == {('test', 1),
- ('no', 1), ('area', 2),
- ('holz', 1), ('strasse', 1),
- ('str', 1)}
+ ('no', 1), ('area', 2)}
def test_init_from_project(monkeypatch, test_config, tokenizer_factory):
@pytest.fixture(autouse=True)
def setup(self, analyzer, sql_functions):
- with analyzer() as anl:
+ sanitizers = [{'step': 'split-name-list'},
+ {'step': 'strip-brace-terms'}]
+ with analyzer(sanitizers=sanitizers) as anl:
self.analyzer = anl
yield anl
def expect_name_terms(self, info, *expected_terms):
tokens = self.analyzer.get_word_token_info(expected_terms)
- print (tokens)
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
def process_named_place(self, names):
- place = {'name': names}
-
- return self.analyzer.process_place(PlaceInfo(place))
+ return self.analyzer.process_place(PlaceInfo({'name': names}))
def test_simple_names(self):