private function addTokensFromDB(&$oValidTokens, $aTokens, $sNormQuery)
{
// Check which tokens we have, get the ID numbers
- $sSQL = 'SELECT word_id, word_token, word, class, type, country_code,';
- $sSQL .= ' operator, coalesce(search_name_count, 0) as count';
+ $sSQL = 'SELECT word_id, word_token, type';
+ $sSQL .= " info->>'cc' as country";
$sSQL .= ' FROM word WHERE word_token in (';
$sSQL .= join(',', $this->oDB->getDBQuotedList($aTokens)).')';
$aDBWords = $this->oDB->getAll($sSQL, null, 'Could not get word tokens.');
foreach ($aDBWords as $aWord) {
- $oToken = null;
- $iId = (int) $aWord['word_id'];
+ switch ($aWord['type']) {
+ 'C': // country name tokens
+ if ($aWord['country'] === null
+ || ($this->aCountryRestriction
+ && !in_array($aWord['country'], $this->aCountryRestriction))
+ ) {
+ continue;
+ }
+ $oToken = new Token\Country($iId, $aWord['country'])
+ break;
+ default:
+ continue;
+ }
+/* $iId = (int) $aWord['word_id'];
if ($aWord['class']) {
// Special terms need to appear in their normalized form.
$aWord['word_token'],
(int) $aWord['count']
);
- }
+ }*/
- if ($oToken) {
- // remove any leading spaces
- if ($aWord['word_token'][0] == ' ') {
- $oValidTokens->addToken(substr($aWord['word_token'], 1), $oToken);
- } else {
- $oValidTokens->addToken($aWord['word_token'], $oToken);
- }
- }
+ $oValidTokens->addToken($aWord['word_token'], $oToken);
}
}
for ($i = 0; $i < $iNumWords; $i++) {
$sPhrase = $aWords[$i];
- $aTokens[' '.$sPhrase] = ' '.$sPhrase;
$aTokens[$sPhrase] = $sPhrase;
for ($j = $i + 1; $j < $iNumWords; $j++) {
$sPhrase .= ' '.$aWords[$j];
- $aTokens[' '.$sPhrase] = ' '.$sPhrase;
$aTokens[$sPhrase] = $sPhrase;
}
}
"""
word_tokens = set()
for name in self._compute_full_names(names):
- if name:
- word_tokens.add(' ' + self.name_processor.get_search_normalized(name))
+ norm_name = self.name_processor.get_search_normalized(name)
+ if norm_name:
+ word_tokens.add(norm_name)
with self.conn.cursor() as cur:
# Get existing names
- cur.execute("SELECT word_token FROM word WHERE country_code = %s",
+ cur.execute("""SELECT word_token FROM word
+ WHERE type = 'C' and info->>'cc'= %s""",
(country_code, ))
word_tokens.difference_update((t[0] for t in cur))
+ # Only add those names that are not yet in the list.
if word_tokens:
- cur.execute("""INSERT INTO word (word_id, word_token, country_code,
- search_name_count)
- (SELECT nextval('seq_word'), token, %s, 0
+ cur.execute("""INSERT INTO word (word_token, type, info)
+ (SELECT token, 'C', json_build_object('cc', %s)
FROM unnest(%s) as token)
""", (country_code, list(word_tokens)))
+ # No names are deleted at the moment.
+ # If deletion is made possible, then the static names from the
+ # initial 'country_name' table should be kept.
+
def process_place(self, place):
""" Determine tokenizer information about the given place.