- hnrs = []
- addr_terms = []
- for key, value in address.items():
- if key == 'postcode':
- self._add_postcode(value)
- elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
- hnrs.append(value)
- elif key == 'street':
- token_info.add_street(*self._compute_name_tokens({'name': value}))
- elif key == 'place':
- token_info.add_place(*self._compute_name_tokens({'name': value}))
- elif not key.startswith('_') and \
- key not in ('country', 'full'):
- addr_terms.append((key, *self._compute_name_tokens({'name': value})))
-
- if hnrs:
- hnrs = self._split_housenumbers(hnrs)
- token_info.add_housenumbers(self.conn, [self._make_standard_hnr(n) for n in hnrs])
-
- if addr_terms:
- token_info.add_address_terms(addr_terms)
+ for item in address:
+ if item.kind == 'postcode':
+ self._add_postcode(item.name)
+ elif item.kind == 'housenumber':
+ token_info.add_housenumber(*self._compute_housenumber_token(item))
+ elif item.kind == 'street':
+ token_info.add_street(self._retrieve_full_tokens(item.name))
+ elif item.kind == 'place':
+ if not item.suffix:
+ token_info.add_place(self._compute_partial_tokens(item.name))
+ elif not item.kind.startswith('_') and not item.suffix and \
+ item.kind not in ('country', 'full'):
+ token_info.add_address_term(item.kind, self._compute_partial_tokens(item.name))
+
+
+ def _compute_housenumber_token(self, hnr):
+ """ Normalize the housenumber and return the word token and the
+ canonical form.
+ """
+ analyzer = self.token_analysis.analysis.get('@housenumber')
+ result = None, None
+
+ if analyzer is None:
+ # When no custom analyzer is set, simply normalize and transliterate
+ norm_name = self._search_normalized(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
+ result = cur.fetchone()[0], norm_name
+ self._cache.housenumbers[norm_name] = result
+ else:
+ # Otherwise use the analyzer to determine the canonical name.
+ # Per convention we use the first variant as the 'lookup name', the
+ # name that gets saved in the housenumber field of the place.
+ norm_name = analyzer.normalize(hnr.name)
+ if norm_name:
+ result = self._cache.housenumbers.get(norm_name, result)
+ if result[0] is None:
+ variants = analyzer.get_variants_ascii(norm_name)
+ if variants:
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT create_analyzed_hnr_id(%s, %s)",
+ (norm_name, list(variants)))
+ result = cur.fetchone()[0], variants[0]
+ self._cache.housenumbers[norm_name] = result
+
+ return result
+
+
+ def _compute_partial_tokens(self, name):
+ """ Normalize the given term, split it into partial words and return
+ then token list for them.
+ """
+ norm_name = self._search_normalized(name)
+
+ tokens = []
+ need_lookup = []
+ for partial in norm_name.split():
+ token = self._cache.partials.get(partial)
+ if token:
+ tokens.append(token)
+ else:
+ need_lookup.append(partial)
+
+ if need_lookup:
+ with self.conn.cursor() as cur:
+ cur.execute("""SELECT word, getorcreate_partial_word(word)
+ FROM unnest(%s) word""",
+ (need_lookup, ))
+
+ for partial, token in cur:
+ tokens.append(token)
+ self._cache.partials[partial] = token
+
+ return tokens
+
+
+ def _retrieve_full_tokens(self, name):
+ """ Get the full name token for the given name, if it exists.
+ The name is only retrived for the standard analyser.
+ """
+ norm_name = self._search_normalized(name)
+
+ # return cached if possible
+ if norm_name in self._cache.fulls:
+ return self._cache.fulls[norm_name]
+
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT word_id FROM word WHERE word_token = %s and type = 'W'",
+ (norm_name, ))
+ full = [row[0] for row in cur]
+
+ self._cache.fulls[norm_name] = full
+
+ return full