]> git.openstreetmap.org Git - nominatim.git/commitdiff
Merge remote-tracking branch 'upstream/master'
authorSarah Hoffmann <lonvia@denofr.de>
Wed, 16 Mar 2022 13:33:22 +0000 (14:33 +0100)
committerSarah Hoffmann <lonvia@denofr.de>
Wed, 16 Mar 2022 13:33:22 +0000 (14:33 +0100)
27 files changed:
docs/admin/Installation.md
docs/customize/Tokenizers.md
lib-php/ReverseGeocode.php
lib-php/tokenizer/icu_tokenizer.php
lib-sql/functions/placex_triggers.sql
lib-sql/tokenizer/icu_tokenizer.sql
lib-sql/tokenizer/icu_tokenizer_tables.sql
nominatim/indexer/runners.py
nominatim/tokenizer/icu_token_analysis.py
nominatim/tokenizer/icu_tokenizer.py
nominatim/tokenizer/legacy_tokenizer.py
nominatim/tokenizer/token_analysis/generic.py
nominatim/tokenizer/token_analysis/housenumbers.py [new file with mode: 0644]
nominatim/tools/replication.py
settings/env.defaults
settings/icu_tokenizer.yaml
test/bdd/db/query/housenumbers.feature
test/bdd/db/query/interpolation.feature [new file with mode: 0644]
test/bdd/steps/http_responses.py
test/bdd/steps/steps_api_queries.py
test/python/mock_icu_word_table.py
test/python/tokenizer/test_icu.py
test/python/tokenizer/token_analysis/test_generic.py
test/python/tokenizer/token_analysis/test_generic_mutation.py
vagrant/Install-on-Centos-8.sh
vagrant/Install-on-Ubuntu-18.sh
vagrant/Install-on-Ubuntu-20.sh

index 19ad2dbb9b0be7853a67346edf17d2fd1d5606db..00c7ca29a53699066afd03090abe0dfbc246922d 100644 (file)
@@ -129,7 +129,7 @@ If you want to install latest development version from github, make sure to
 also check out the osm2pgsql subproject:
 
 ```
-git clone --recursive git://github.com/openstreetmap/Nominatim.git
+git clone --recursive https://github.com/openstreetmap/Nominatim.git
 ```
 
 The development version does not include the country grid. Download it separately:
index f75bc6a5c9da88fad8b92bc046337d2fcebff863..d849eb48c0d457c7c57b27e2807ff55675e2fa33 100644 (file)
@@ -206,15 +206,16 @@ by a sanitizer (see for example the
 The token-analysis section contains the list of configured analyzers. Each
 analyzer must have an `id` parameter that uniquely identifies the analyzer.
 The only exception is the default analyzer that is used when no special
-analyzer was selected.
+analyzer was selected. There is one special id '@housenumber'. If an analyzer
+with that name is present, it is used for normalization of house numbers.
 
 Different analyzer implementations may exist. To select the implementation,
-the `analyzer` parameter must be set. Currently there is only one implementation
-`generic` which is described in the following.
+the `analyzer` parameter must be set. The different implementations are
+described in the following.
 
 ##### Generic token analyzer
 
-The generic analyzer is able to create variants from a list of given
+The generic analyzer `generic` is able to create variants from a list of given
 abbreviation and decomposition replacements and introduce spelling variations.
 
 ###### Variants
@@ -331,6 +332,14 @@ the mode by adding:
 
 to the analyser configuration.
 
+##### Housenumber token analyzer
+
+The analyzer `housenumbers` is purpose-made to analyze house numbers. It
+creates variants with optional spaces between numbers and letters. Thus,
+house numbers of the form '3 a', '3A', '3-A' etc. are all considered equivalent.
+
+The analyzer cannot be customized.
+
 ### Reconfiguration
 
 Changing the configuration after the import is currently not possible, although
index 646c592b9f5d4b3dc5efba035406602f0b44d3bc..35103aeb3f78681e93fc5e3aa0c287fb498739df 100644 (file)
@@ -64,7 +64,9 @@ class ReverseGeocode
     {
         Debug::newFunction('lookupInterpolation');
         $sSQL = 'SELECT place_id, parent_place_id, 30 as rank_search,';
-        $sSQL .= '  (endnumber - startnumber) * ST_LineLocatePoint(linegeo,'.$sPointSQL.') as fhnr,';
+        $sSQL .= '  (CASE WHEN endnumber != startnumber';
+        $sSQL .= '        THEN (endnumber - startnumber) * ST_LineLocatePoint(linegeo,'.$sPointSQL.')';
+        $sSQL .= '        ELSE startnumber END) as fhnr,';
         $sSQL .= '  startnumber, endnumber, step,';
         $sSQL .= '  ST_Distance(linegeo,'.$sPointSQL.') as distance';
         $sSQL .= ' FROM location_property_osmline';
index cbbf240a27a2c4f95fd1180bb0842dd28c613ee2..ccce99ca1330d7a42a6976d7fb7c9eaf3d8a84d7 100644 (file)
@@ -157,7 +157,8 @@ class Tokenizer
         $sSQL = 'SELECT word_id, word_token, type, word,';
         $sSQL .= "      info->>'op' as operator,";
         $sSQL .= "      info->>'class' as class, info->>'type' as ctype,";
-        $sSQL .= "      info->>'count' as count";
+        $sSQL .= "      info->>'count' as count,";
+        $sSQL .= "      info->>'lookup' as lookup";
         $sSQL .= ' FROM word WHERE word_token in (';
         $sSQL .= join(',', $this->oDB->getDBQuotedList($aTokens)).')';
 
@@ -179,7 +180,8 @@ class Tokenizer
                     }
                     break;
                 case 'H':  // house number tokens
-                    $oValidTokens->addToken($sTok, new Token\HouseNumber($iId, $aWord['word_token']));
+                    $sLookup = $aWord['lookup'] ?? $aWord['word_token'];
+                    $oValidTokens->addToken($sTok, new Token\HouseNumber($iId, $sLookup));
                     break;
                 case 'P':  // postcode tokens
                     // Postcodes are not normalized, so they may have content
index 6ab3e84d300790dbdf766010b122bf3a34912330..1eae353e1a0332c5e2297e3bd1e53a578e1f4e95 100644 (file)
@@ -342,9 +342,10 @@ BEGIN
     WHERE s.place_id = parent_place_id;
 
   FOR addr_item IN
-    SELECT (get_addr_tag_rank(key, country)).*, key,
+    SELECT ranks.*, key,
            token_get_address_search_tokens(token_info, key) as search_tokens
-      FROM token_get_address_keys(token_info) as key
+      FROM token_get_address_keys(token_info) as key,
+           LATERAL get_addr_tag_rank(key, country) as ranks
       WHERE not token_get_address_search_tokens(token_info, key) <@ parent_address_vector
   LOOP
     addr_place := get_address_place(in_partition, geometry,
@@ -456,10 +457,12 @@ BEGIN
   address_havelevel := array_fill(false, ARRAY[maxrank]);
 
   FOR location IN
-    SELECT (get_address_place(partition, geometry, from_rank, to_rank,
-                              extent, token_info, key)).*, key
-      FROM (SELECT (get_addr_tag_rank(key, country)).*, key
-              FROM token_get_address_keys(token_info) as key) x
+    SELECT apl.*, key
+      FROM (SELECT extra.*, key
+              FROM token_get_address_keys(token_info) as key,
+                   LATERAL get_addr_tag_rank(key, country) as extra) x,
+           LATERAL get_address_place(partition, geometry, from_rank, to_rank,
+                              extent, token_info, key) as apl
       ORDER BY rank_address, distance, isguess desc
   LOOP
     IF location.place_id is null THEN
index 03408b4ac1b8d8dba868bbb7cb60e3e9febe5e56..a3dac8ddcbe82eb5fd6057bd81bb9b823befa159 100644 (file)
@@ -200,3 +200,26 @@ BEGIN
 END;
 $$
 LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION create_analyzed_hnr_id(norm_term TEXT, lookup_terms TEXT[])
+  RETURNS INTEGER
+  AS $$
+DECLARE
+  return_id INTEGER;
+BEGIN
+  SELECT min(word_id) INTO return_id
+    FROM word WHERE word = norm_term and type = 'H';
+
+  IF return_id IS NULL THEN
+    return_id := nextval('seq_word');
+    INSERT INTO word (word_id, word_token, type, word, info)
+      SELECT return_id, lookup_term, 'H', norm_term,
+             json_build_object('lookup', lookup_terms[1])
+        FROM unnest(lookup_terms) as lookup_term;
+  END IF;
+
+  RETURN return_id;
+END;
+$$
+LANGUAGE plpgsql;
index 58965b57fcb25478021cc8a0c27ccb8ddeaa7d85..509f6f65d9d6e5dd659c3d95ad2e3220b4209bb5 100644 (file)
@@ -28,6 +28,10 @@ CREATE INDEX idx_word_postcodes ON word
 CREATE INDEX idx_word_full_word ON word
     USING btree(word) {{db.tablespace.address_index}}
     WHERE type = 'W';
+-- Used when inserting analyzed housenumbers (exclude old-style entries).
+CREATE INDEX idx_word_housenumbers ON word
+    USING btree(word) {{db.tablespace.address_index}}
+    WHERE type = 'H' and word is not null;
 
 GRANT SELECT ON word TO "{{config.DATABASE_WEBUSER}}";
 
index 76bd3b3187fd373f296324f6c3a79160d3cd36a0..ac7a0015a78a27469f6edaa85f3ae3dbfe70fff5 100644 (file)
@@ -45,8 +45,9 @@ class AbstractPlacexRunner:
 
     @staticmethod
     def get_place_details(worker, ids):
-        worker.perform("""SELECT place_id, (placex_indexing_prepare(placex)).*
-                          FROM placex WHERE place_id IN %s""",
+        worker.perform("""SELECT place_id, extra.*
+                          FROM placex, LATERAL placex_indexing_prepare(placex) as extra
+                          WHERE place_id IN %s""",
                        (tuple((p[0] for p in ids)), ))
 
 
index 1d319b32edd7556dd80004a77f3faa058d82b727..68fc82e333b6a44de6eb9d42ed06a2d4ae17da58 100644 (file)
@@ -25,5 +25,12 @@ class ICUTokenAnalysis:
         self.search = Transliterator.createFromRules("icu_search",
                                                      norm_rules + trans_rules)
 
-        self.analysis = {name: arules.create(self.to_ascii, arules.config)
+        self.analysis = {name: arules.create(self.normalizer, self.to_ascii, arules.config)
                          for name, arules in analysis_rules.items()}
+
+
+    def get_analyzer(self, name):
+        """ Return the given named analyzer. If no analyzer with that
+            name exists, return the default analyzer.
+        """
+        return self.analysis.get(name) or self.analysis[None]
index 9c25b6d7940fc145a2565a326d239463e32227cc..1799ae86d0330ee61c2fc5fe05118ff00e0ef162 100644 (file)
@@ -119,12 +119,13 @@ class LegacyICUTokenizer(AbstractTokenizer):
             if not conn.table_exists('search_name'):
                 return
             with conn.cursor(name="hnr_counter") as cur:
-                cur.execute("""SELECT word_id, word_token FROM word
+                cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
+                               FROM word
                                WHERE type = 'H'
                                  AND NOT EXISTS(SELECT * FROM search_name
                                                 WHERE ARRAY[word.word_id] && name_vector)
-                                 AND (char_length(word_token) > 6
-                                      OR word_token not similar to '\\d+')
+                                 AND (char_length(coalesce(word, word_token)) > 6
+                                      OR coalesce(word, word_token) not similar to '\\d+')
                             """)
                 candidates = {token: wid for wid, token in cur}
             with conn.cursor(name="hnr_counter") as cur:
@@ -137,6 +138,7 @@ class LegacyICUTokenizer(AbstractTokenizer):
                     for hnr in row[0].split(';'):
                         candidates.pop(hnr, None)
             LOG.info("There are %s outdated housenumbers.", len(candidates))
+            LOG.debug("Outdated housenumbers: %s", candidates.keys())
             if candidates:
                 with conn.cursor() as cur:
                     cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
@@ -282,13 +284,6 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
         return postcode.strip().upper()
 
 
-    def _make_standard_hnr(self, hnr):
-        """ Create a normalised version of a housenumber.
-
-            This function takes minor shortcuts on transliteration.
-        """
-        return self._search_normalized(hnr)
-
     def update_postcodes_from_db(self):
         """ Update postcode tokens in the word table from the location_postcode
             table.
@@ -456,14 +451,12 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
             Returns a JSON-serializable structure that will be handed into
             the database via the token_info field.
         """
-        token_info = _TokenInfo(self._cache)
+        token_info = _TokenInfo()
 
         names, address = self.sanitizer.process_names(place)
 
         if names:
-            fulls, partials = self._compute_name_tokens(names)
-
-            token_info.add_names(fulls, partials)
+            token_info.set_names(*self._compute_name_tokens(names))
 
             if place.is_country():
                 self._add_country_full_names(place.country_code, names)
@@ -471,37 +464,59 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
         if address:
             self._process_place_address(token_info, address)
 
-        return token_info.data
+        return token_info.to_dict()
 
 
     def _process_place_address(self, token_info, address):
-        hnrs = set()
-        addr_terms = []
-        streets = []
         for item in address:
             if item.kind == 'postcode':
                 self._add_postcode(item.name)
             elif item.kind == 'housenumber':
-                norm_name = self._make_standard_hnr(item.name)
-                if norm_name:
-                    hnrs.add(norm_name)
+                token_info.add_housenumber(*self._compute_housenumber_token(item))
             elif item.kind == 'street':
-                streets.extend(self._retrieve_full_tokens(item.name))
+                token_info.add_street(self._retrieve_full_tokens(item.name))
             elif item.kind == 'place':
                 if not item.suffix:
                     token_info.add_place(self._compute_partial_tokens(item.name))
             elif not item.kind.startswith('_') and not item.suffix and \
                  item.kind not in ('country', 'full'):
-                addr_terms.append((item.kind, self._compute_partial_tokens(item.name)))
+                token_info.add_address_term(item.kind, self._compute_partial_tokens(item.name))
 
-        if hnrs:
-            token_info.add_housenumbers(self.conn, hnrs)
 
-        if addr_terms:
-            token_info.add_address_terms(addr_terms)
+    def _compute_housenumber_token(self, hnr):
+        """ Normalize the housenumber and return the word token and the
+            canonical form.
+        """
+        analyzer = self.token_analysis.analysis.get('@housenumber')
+        result = None, None
+
+        if analyzer is None:
+            # When no custom analyzer is set, simply normalize and transliterate
+            norm_name = self._search_normalized(hnr.name)
+            if norm_name:
+                result = self._cache.housenumbers.get(norm_name, result)
+                if result[0] is None:
+                    with self.conn.cursor() as cur:
+                        cur.execute("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
+                        result = cur.fetchone()[0], norm_name
+                        self._cache.housenumbers[norm_name] = result
+        else:
+            # Otherwise use the analyzer to determine the canonical name.
+            # Per convention we use the first variant as the 'lookup name', the
+            # name that gets saved in the housenumber field of the place.
+            norm_name = analyzer.normalize(hnr.name)
+            if norm_name:
+                result = self._cache.housenumbers.get(norm_name, result)
+                if result[0] is None:
+                    variants = analyzer.get_variants_ascii(norm_name)
+                    if variants:
+                        with self.conn.cursor() as cur:
+                            cur.execute("SELECT create_analyzed_hnr_id(%s, %s)",
+                                        (norm_name, list(variants)))
+                            result = cur.fetchone()[0], variants[0]
+                            self._cache.housenumbers[norm_name] = result
 
-        if streets:
-            token_info.add_street(streets)
+        return result
 
 
     def _compute_partial_tokens(self, name):
@@ -561,7 +576,8 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
 
         for name in names:
             analyzer_id = name.get_attr('analyzer')
-            norm_name = self._normalized(name.name)
+            analyzer = self.token_analysis.get_analyzer(analyzer_id)
+            norm_name = analyzer.normalize(name.name)
             if analyzer_id is None:
                 token_id = norm_name
             else:
@@ -569,12 +585,12 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
 
             full, part = self._cache.names.get(token_id, (None, None))
             if full is None:
-                variants = self.token_analysis.analysis[analyzer_id].get_variants_ascii(norm_name)
+                variants = analyzer.get_variants_ascii(norm_name)
                 if not variants:
                     continue
 
                 with self.conn.cursor() as cur:
-                    cur.execute("SELECT (getorcreate_full_word(%s, %s)).*",
+                    cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
                                 (token_id, variants))
                     full, part = cur.fetchone()
 
@@ -611,50 +627,76 @@ class LegacyICUNameAnalyzer(AbstractAnalyzer):
 class _TokenInfo:
     """ Collect token information to be sent back to the database.
     """
-    def __init__(self, cache):
-        self._cache = cache
-        self.data = {}
+    def __init__(self):
+        self.names = None
+        self.housenumbers = set()
+        self.housenumber_tokens = set()
+        self.street_tokens = set()
+        self.place_tokens = set()
+        self.address_tokens = {}
+
 
     @staticmethod
     def _mk_array(tokens):
-        return '{%s}' % ','.join((str(s) for s in tokens))
+        return f"{{{','.join((str(s) for s in tokens))}}}"
+
+
+    def to_dict(self):
+        """ Return the token information in database importable format.
+        """
+        out = {}
+
+        if self.names:
+            out['names'] = self.names
+
+        if self.housenumbers:
+            out['hnr'] = ';'.join(self.housenumbers)
+            out['hnr_tokens'] = self._mk_array(self.housenumber_tokens)
+
+        if self.street_tokens:
+            out['street'] = self._mk_array(self.street_tokens)
 
+        if self.place_tokens:
+            out['place'] = self._mk_array(self.place_tokens)
 
-    def add_names(self, fulls, partials):
+        if self.address_tokens:
+            out['addr'] = self.address_tokens
+
+        return out
+
+
+    def set_names(self, fulls, partials):
         """ Adds token information for the normalised names.
         """
-        self.data['names'] = self._mk_array(itertools.chain(fulls, partials))
+        self.names = self._mk_array(itertools.chain(fulls, partials))
 
 
-    def add_housenumbers(self, conn, hnrs):
+    def add_housenumber(self, token, hnr):
         """ Extract housenumber information from a list of normalised
             housenumbers.
         """
-        self.data['hnr_tokens'] = self._mk_array(self._cache.get_hnr_tokens(conn, hnrs))
-        self.data['hnr'] = ';'.join(hnrs)
+        if token:
+            self.housenumbers.add(hnr)
+            self.housenumber_tokens.add(token)
 
 
     def add_street(self, tokens):
         """ Add addr:street match terms.
         """
-        self.data['street'] = self._mk_array(tokens)
+        self.street_tokens.update(tokens)
 
 
     def add_place(self, tokens):
         """ Add addr:place search and match terms.
         """
-        if tokens:
-            self.data['place'] = self._mk_array(tokens)
+        self.place_tokens.update(tokens)
 
 
-    def add_address_terms(self, terms):
+    def add_address_term(self, key, partials):
         """ Add additional address terms.
         """
-        tokens = {key: self._mk_array(partials)
-                  for key, partials in terms if partials}
-
-        if tokens:
-            self.data['addr'] = tokens
+        if partials:
+            self.address_tokens[key] = self._mk_array(partials)
 
 
 class _TokenCache:
@@ -669,29 +711,3 @@ class _TokenCache:
         self.fulls = {}
         self.postcodes = set()
         self.housenumbers = {}
-
-
-    def get_hnr_tokens(self, conn, terms):
-        """ Get token ids for a list of housenumbers, looking them up in the
-            database if necessary. `terms` is an iterable of normalized
-            housenumbers.
-        """
-        tokens = []
-        askdb = []
-
-        for term in terms:
-            token = self.housenumbers.get(term)
-            if token is None:
-                askdb.append(term)
-            else:
-                tokens.append(token)
-
-        if askdb:
-            with conn.cursor() as cur:
-                cur.execute("SELECT nr, getorcreate_hnr_id(nr) FROM unnest(%s) as nr",
-                            (askdb, ))
-                for term, tid in cur:
-                    self.housenumbers[term] = tid
-                    tokens.append(tid)
-
-        return tokens
index 7ce6b24250f4d303ed229216fa29ba92dd6dd095..28f4b32756c0756ea172ca3aa16a458ac6ce929d 100644 (file)
@@ -515,7 +515,7 @@ class _TokenInfo:
             simple_list = list(set(simple_list))
 
         with conn.cursor() as cur:
-            cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
+            cur.execute("SELECT * FROM create_housenumbers(%s)", (simple_list, ))
             self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
 
 
index d4eae312d9dfc4f3407cb915988a41256614565b..3de915ba5254e1859976dd7e9842247df5a58b98 100644 (file)
@@ -47,10 +47,10 @@ def configure(rules, normalization_rules):
 
 ### Analysis section
 
-def create(transliterator, config):
+def create(normalizer, transliterator, config):
     """ Create a new token analysis instance for this module.
     """
-    return GenericTokenAnalysis(transliterator, config)
+    return GenericTokenAnalysis(normalizer, transliterator, config)
 
 
 class GenericTokenAnalysis:
@@ -58,7 +58,8 @@ class GenericTokenAnalysis:
         and provides the functions to apply the transformations.
     """
 
-    def __init__(self, to_ascii, config):
+    def __init__(self, norm, to_ascii, config):
+        self.norm = norm
         self.to_ascii = to_ascii
         self.variant_only = config['variant_only']
 
@@ -74,6 +75,13 @@ class GenericTokenAnalysis:
         self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
 
 
+    def normalize(self, name):
+        """ Return the normalized form of the name. This is the standard form
+            from which possible variants for the name can be derived.
+        """
+        return self.norm.transliterate(name).strip()
+
+
     def get_variants_ascii(self, norm_name):
         """ Compute the spelling variants for the given normalized name
             and transliterate the result.
diff --git a/nominatim/tokenizer/token_analysis/housenumbers.py b/nominatim/tokenizer/token_analysis/housenumbers.py
new file mode 100644 (file)
index 0000000..96e86b2
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim. (https://nominatim.org)
+#
+# Copyright (C) 2022 by the Nominatim developer community.
+# For a full list of authors see the git log.
+"""
+Specialized processor for housenumbers. Analyses common housenumber patterns
+and creates variants for them.
+"""
+import re
+
+from nominatim.tokenizer.token_analysis.generic_mutation import MutationVariantGenerator
+
+RE_NON_DIGIT = re.compile('[^0-9]')
+RE_DIGIT_ALPHA = re.compile(r'(\d)\s*([^\d\s␣])')
+RE_ALPHA_DIGIT = re.compile(r'([^\s\d␣])\s*(\d)')
+RE_NAMED_PART = re.compile(r'[a-z]{4}')
+
+### Configuration section
+
+def configure(rules, normalization_rules): # pylint: disable=W0613
+    """ All behaviour is currently hard-coded.
+    """
+    return None
+
+### Analysis section
+
+def create(normalizer, transliterator, config): # pylint: disable=W0613
+    """ Create a new token analysis instance for this module.
+    """
+    return HousenumberTokenAnalysis(normalizer, transliterator)
+
+
+class HousenumberTokenAnalysis:
+    """ Detects common housenumber patterns and normalizes them.
+    """
+    def __init__(self, norm, trans):
+        self.norm = norm
+        self.trans = trans
+
+        self.mutator = MutationVariantGenerator('␣', (' ', ''))
+
+    def normalize(self, name):
+        """ Return the normalized form of the housenumber.
+        """
+        # shortcut for number-only numbers, which make up 90% of the data.
+        if RE_NON_DIGIT.search(name) is None:
+            return name
+
+        norm = self.trans.transliterate(self.norm.transliterate(name))
+        # If there is a significant non-numeric part, use as is.
+        if RE_NAMED_PART.search(norm) is None:
+            # Otherwise add optional spaces between digits and letters.
+            (norm_opt, cnt1) = RE_DIGIT_ALPHA.subn(r'\1␣\2', norm)
+            (norm_opt, cnt2) = RE_ALPHA_DIGIT.subn(r'\1␣\2', norm_opt)
+            # Avoid creating too many variants per number.
+            if cnt1 + cnt2 <= 4:
+                return norm_opt
+
+        return norm
+
+    def get_variants_ascii(self, norm_name):
+        """ Compute the spelling variants for the given normalized housenumber.
+
+            Generates variants for optional spaces (marked with '␣').
+        """
+        return list(self.mutator.generate([norm_name]))
index cbbf05859eaa66e409f4415d410cdfd3a392e857..fbd33e391084137004a31b0eeb3cbd4d08df28ec 100644 (file)
@@ -47,7 +47,7 @@ def init_replication(conn, base_url):
 
     status.set_status(conn, date=date, seq=seq)
 
-    LOG.warning("Updates intialised at sequence %s (%s)", seq, date)
+    LOG.warning("Updates initialised at sequence %s (%s)", seq, date)
 
 
 def check_for_updates(conn, base_url):
index 00f5569ae0894db7aa1179cd39bb635ad98042eb..e5dfe4a6094c01b9ea9605b5de06b8cfc666b274 100644 (file)
@@ -74,8 +74,6 @@ NOMINATIM_HTTP_PROXY_PASSWORD=
 # HTTPS_PROXY="http://user:pass@10.10.1.10:1080"
 
 # Location of the osm2pgsql binary.
-# When empty, osm2pgsql is expected to reside in the osm2pgsql directory in
-# the project directory.
 # EXPERT ONLY. You should usually use the supplied osm2pgsql.
 NOMINATIM_OSM2PGSQL_BINARY=
 
index 50bb72d2eed9c03b9a9d76823b684a9413c88277..bebd49e924bcf08fa3da5cbb04c5cf4d3c486533 100644 (file)
@@ -41,6 +41,8 @@ sanitizers:
       mode: append
 token-analysis:
     - analyzer: generic
+    - id: "@housenumber"
+      analyzer: housenumbers
     - id: bg
       analyzer: generic
       mode: variant-only
index 4d42da9f072d8d9abea3e4171bb137f39ace0b96..106bc8bb2cd0c1db653c22a03d2b2064aa9916c2 100644 (file)
@@ -9,10 +9,10 @@ Feature: Searching of house numbers
          |   |   |   |   | 4 |
 
 
-    Scenario: A simple numeral housenumber is found
+    Scenario: A simple ascii digit housenumber is found
         Given the places
-         | osm | class    | type | housenr | geometry |
-         | N1  | building | yes  | 45      | 9        |
+         | osm | class    | type | housenr  | geometry |
+         | N1  | building | yes  | 45       | 9        |
         And the places
          | osm | class   | type | name       | geometry |
          | W10 | highway | path | North Road | 1,2,3    |
@@ -27,6 +27,35 @@ Feature: Searching of house numbers
          | N1  |
 
 
+    @fail-legacy
+    Scenario Outline: Numeral housenumbers in any script are found
+        Given the places
+         | osm | class    | type | housenr  | geometry |
+         | N1  | building | yes  | <number> | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | North Road | 1,2,3    |
+        When importing
+        And sending search query "45, North Road"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "North Road ④⑤"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "North Road 𑁪𑁫"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | number |
+        | 45     |
+        | ④⑤     |
+        | 𑁪𑁫     |
+
+
     Scenario Outline: Each housenumber in a list is found
         Given the places
          | osm | class    | type | housenr | geometry |
@@ -55,6 +84,202 @@ Feature: Searching of house numbers
         | 2, 4, 12 |
 
 
+    @fail-legacy
+    Scenario Outline: Housenumber - letter combinations are found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name     | geometry |
+         | W10 | highway | path | Multistr | 1,2,3    |
+        When importing
+        When sending search query "2A Multistr"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "2 a Multistr"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "2-A Multistr"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Multistr 2 A"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | hnr |
+        | 2a  |
+        | 2 A |
+        | 2-a |
+        | 2/A |
+
+
+    Scenario Outline: Number - Number combinations as a housenumber are found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | Chester St | 1,2,3    |
+        When importing
+        When sending search query "34-10 Chester St"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "34/10 Chester St"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "34 10 Chester St"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "3410 Chester St"
+        Then results contain
+         | osm |
+         | W10 |
+
+    Examples:
+        | hnr   |
+        | 34-10 |
+        | 34 10 |
+        | 34/10 |
+
+
+    @fail-legacy
+    Scenario Outline: a bis housenumber is found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | Rue Paris | 1,2,3    |
+        When importing
+        When sending search query "Rue Paris 45bis"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue Paris 45 BIS"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue Paris 45BIS"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue Paris 45 bis"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | hnr   |
+        | 45bis |
+        | 45BIS |
+        | 45 BIS |
+        | 45 bis |
+
+
+    @fail-legacy
+    Scenario Outline: a ter housenumber is found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | Rue du Berger | 1,2,3    |
+        When importing
+        When sending search query "Rue du Berger 45ter"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue du Berger 45 TER"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue du Berger 45TER"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Rue du Berger 45 ter"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | hnr   |
+        | 45ter |
+        | 45TER |
+        | 45 ter |
+        | 45 TER |
+
+
+    @fail-legacy
+    Scenario Outline: a number - letter - number combination housenumber is found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | Herengracht | 1,2,3    |
+        When importing
+        When sending search query "501-H 1 Herengracht"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "501H-1 Herengracht"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "501H1 Herengracht"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "501-H1 Herengracht"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | hnr |
+        | 501 H1 |
+        | 501H 1 |
+        | 501/H/1 |
+        | 501h1 |
+
+
+    @fail-legacy
+    Scenario Outline: Russian housenumbers are found
+        Given the places
+         | osm | class    | type | housenr | geometry |
+         | N1  | building | yes  | <hnr>   | 9        |
+        And the places
+         | osm | class   | type | name       | geometry |
+         | W10 | highway | path | Голубинская улица | 1,2,3    |
+        When importing
+        When sending search query "Голубинская улица 55к3"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Голубинская улица 55 k3"
+        Then results contain
+         | osm |
+         | N1  |
+        When sending search query "Голубинская улица 55 к-3"
+        Then results contain
+         | osm |
+         | N1  |
+
+    Examples:
+        | hnr |
+        | 55к3 |
+        | 55 к3 |
+
+
     Scenario: A name mapped as a housenumber is found
         Given the places
          | osm | class    | type | housenr | geometry |
diff --git a/test/bdd/db/query/interpolation.feature b/test/bdd/db/query/interpolation.feature
new file mode 100644 (file)
index 0000000..602ac43
--- /dev/null
@@ -0,0 +1,58 @@
+@DB
+Feature: Query of address interpolations
+    Tests that interpolated addresses can be queried correctly
+
+    Background:
+        Given the grid
+          | 1  |  | 2  |  | 3  |
+          | 10 |  | 12 |  | 13 |
+          | 7  |  | 8  |  | 9  |
+
+    Scenario: Find interpolations with single number
+        Given the places
+          | osm | class   | type    | name    | geometry |
+          | W10 | highway | primary | Nickway | 10,12,13 |
+        And the places
+          | osm | class | type   | addr+interpolation | geometry |
+          | W1  | place | houses | odd                | 1,3      |
+        And the places
+          | osm | class | type  | housenr | geometry |
+          | N1  | place | house | 1       | 1        |
+          | N3  | place | house | 5       | 3        |
+        And the ways
+          | id | nodes |
+          | 1  | 1,3   |
+        When importing
+        When sending jsonv2 reverse point 2
+        Then results contain
+          | ID | display_name |
+          | 0  | 3, Nickway   |
+        When sending search query "Nickway 3"
+        Then results contain
+          | osm | display_name |
+          | W1  | 3, Nickway   |
+
+
+    Scenario: Find interpolations with multiple numbers
+        Given the places
+          | osm | class   | type    | name    | geometry |
+          | W10 | highway | primary | Nickway | 10,12,13 |
+        And the places
+          | osm | class | type   | addr+interpolation | geometry |
+          | W1  | place | houses | even               | 1,3      |
+        And the places
+          | osm | class | type  | housenr | geometry |
+          | N1  | place | house | 2       | 1        |
+          | N3  | place | house | 16      | 3        |
+        And the ways
+          | id | nodes |
+          | 1  | 1,3   |
+        When importing
+        When sending jsonv2 reverse point 2
+        Then results contain
+          | ID | display_name | centroid |
+          | 0  | 10, Nickway  | 2 |
+        When sending search query "Nickway 10"
+        Then results contain
+          | osm | display_name  | centroid |
+          | W1  | 10, Nickway   | 2 |
index fa6ab7fb3a051c46bcd9bd0a50b446b23f668a61..fa841d25be068147b298fa20124d06366d35c997 100644 (file)
@@ -62,6 +62,8 @@ class GenericResponse:
 
         if errorcode == 200 and fmt != 'debug':
             getattr(self, '_parse_' + fmt)()
+        else:
+            print("Bad response: ", page)
 
     def _parse_json(self):
         m = re.fullmatch(r'([\w$][^(]*)\((.*)\)', self.page)
@@ -128,7 +130,7 @@ class GenericResponse:
                    "\nBad value for row {} field '{}' in address. Expected: {}, got: {}.\nFull address: {}"""\
                        .format(idx, field, value, address[field], json.dumps(address, indent=4))
 
-    def match_row(self, row):
+    def match_row(self, row, context=None):
         """ Match the result fields against the given behave table row.
         """
         if 'ID' in row.headings:
@@ -151,7 +153,12 @@ class GenericResponse:
                     assert self.result[i]['osm_type'] in (OSM_TYPE[value[0]], value[0]), \
                            BadRowValueAssert(self, i, 'osm_type', value)
                 elif name == 'centroid':
-                    lon, lat = value.split(' ')
+                    if ' ' in value:
+                        lon, lat = value.split(' ')
+                    elif context is not None:
+                        lon, lat = context.osm.grid_node(int(value))
+                    else:
+                        raise RuntimeError("Context needed when using grid coordinates")
                     self.assert_field(i, 'lat', float(lat))
                     self.assert_field(i, 'lon', float(lon))
                 else:
index 0fda8f0866f1d53a5119fcb6aba63a8cfc466f6b..22517338bab04f664198222680f273ae358882a1 100644 (file)
@@ -153,6 +153,20 @@ def website_reverse_request(context, fmt, lat, lon):
 
     context.response = ReverseResponse(outp, fmt or 'xml', status)
 
+@when(u'sending (?P<fmt>\S+ )?reverse point (?P<nodeid>.+)')
+def website_reverse_request(context, fmt, nodeid):
+    params = {}
+    if fmt and fmt.strip() == 'debug':
+        params['debug'] = '1'
+    params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
+
+
+    outp, status = send_api_query('reverse', params, fmt, context)
+
+    context.response = ReverseResponse(outp, fmt or 'xml', status)
+
+
+
 @when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
 def website_details_request(context, fmt, query):
     params = {}
@@ -238,7 +252,7 @@ def step_impl(context):
     context.execute_steps("then at least 1 result is returned")
 
     for line in context.table:
-        context.response.match_row(line)
+        context.response.match_row(line, context=context)
 
 @then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
 def validate_attributes(context, lid, neg, attrs):
index a7363958859ac0cece72ffd9dc0da443093f788c..08fd60a227b0e29be9dbe5777689639641d23d2b 100644 (file)
@@ -58,11 +58,21 @@ class MockIcuWordTable:
         self.conn.commit()
 
 
-    def add_housenumber(self, word_id, word_token):
+    def add_housenumber(self, word_id, word_tokens, word=None):
         with self.conn.cursor() as cur:
-            cur.execute("""INSERT INTO word (word_id, word_token, type)
-                              VALUES (%s, %s, 'H')
-                        """, (word_id, word_token))
+            if isinstance(word_tokens, str):
+                # old style without analyser
+                cur.execute("""INSERT INTO word (word_id, word_token, type)
+                                  VALUES (%s, %s, 'H')
+                            """, (word_id, word_tokens))
+            else:
+                if word is None:
+                    word = word_tokens[0]
+                for token in word_tokens:
+                    cur.execute("""INSERT INTO word (word_id, word_token, type, word, info)
+                                      VALUES (%s, %s, 'H', %s, jsonb_build_object('lookup', %s))
+                                """, (word_id, token, word, word_tokens[0]))
+
         self.conn.commit()
 
 
index 372df9d2d4d56672f8478c2bb2372cbcf9821475..d85a5b65e565d83187b2688839afe72c1f175fcb 100644 (file)
@@ -72,12 +72,15 @@ def analyzer(tokenizer_factory, test_config, monkeypatch,
 
     def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
                      variants=('~gasse -> gasse', 'street => st', ),
-                     sanitizers=[]):
+                     sanitizers=[], with_housenumber=False):
         cfgstr = {'normalization': list(norm),
                   'sanitizers': sanitizers,
                   'transliteration': list(trans),
                   'token-analysis': [{'analyzer': 'generic',
                                       'variants': [{'words': list(variants)}]}]}
+        if with_housenumber:
+            cfgstr['token-analysis'].append({'id': '@housenumber',
+                                             'analyzer': 'housenumbers'})
         (test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(cfgstr))
         tok.loader = nominatim.tokenizer.icu_rule_loader.ICURuleLoader(test_config)
 
@@ -556,6 +559,67 @@ class TestPlaceAddress:
         assert 'addr' not in info
 
 
+class TestPlaceHousenumberWithAnalyser:
+
+    @pytest.fixture(autouse=True)
+    def setup(self, analyzer, sql_functions):
+        hnr = {'step': 'clean-housenumbers',
+               'filter-kind': ['housenumber', 'conscriptionnumber', 'streetnumber']}
+        with analyzer(trans=(":: upper()", "'🜵' > ' '"), sanitizers=[hnr], with_housenumber=True) as anl:
+            self.analyzer = anl
+            yield anl
+
+
+    @pytest.fixture
+    def getorcreate_hnr_id(self, temp_db_cursor):
+        temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_analyzed_hnr_id(norm_term TEXT, lookup_terms TEXT[])
+                                  RETURNS INTEGER AS $$
+                                    SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
+
+
+    def process_address(self, **kwargs):
+        return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
+
+
+    def name_token_set(self, *expected_terms):
+        tokens = self.analyzer.get_word_token_info(expected_terms)
+        for token in tokens:
+            assert token[2] is not None, "No token for {0}".format(token)
+
+        return set((t[2] for t in tokens))
+
+
+    @pytest.mark.parametrize('hnr', ['123 a', '1', '101'])
+    def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
+        info = self.process_address(housenumber=hnr)
+
+        assert info['hnr'] == hnr.upper()
+        assert info['hnr_tokens'] == "{-1}"
+
+
+    def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
+        info = self.process_address(housenumber='134',
+                                    conscriptionnumber='134',
+                                    streetnumber='99a')
+
+        assert set(info['hnr'].split(';')) == set(('134', '99 A'))
+        assert info['hnr_tokens'] == "{-1,-2}"
+
+
+    def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
+        info = self.process_address(housenumber="45")
+        assert info['hnr_tokens'] == "{-1}"
+
+        info = self.process_address(housenumber="46")
+        assert info['hnr_tokens'] == "{-2}"
+
+        info = self.process_address(housenumber="41;45")
+        assert eval(info['hnr_tokens']) == {-1, -3}
+
+        info = self.process_address(housenumber="41")
+        assert eval(info['hnr_tokens']) == {-3}
+
+
 class TestUpdateWordTokens:
 
     @pytest.fixture(autouse=True)
@@ -575,8 +639,20 @@ class TestUpdateWordTokens:
         return _insert
 
 
+    @pytest.fixture(params=['simple', 'analyzed'])
+    def add_housenumber(self, request, word_table):
+        if request.param == 'simple':
+            def _make(hid, hnr):
+                word_table.add_housenumber(hid, hnr)
+        elif request.param == 'analyzed':
+            def _make(hid, hnr):
+                word_table.add_housenumber(hid, [hnr])
+
+        return _make
+
+
     @pytest.mark.parametrize('hnr', ('1a', '1234567', '34 5'))
-    def test_remove_unused_housenumbers(self, word_table, hnr):
+    def test_remove_unused_housenumbers(self, add_housenumber, word_table, hnr):
         word_table.add_housenumber(1000, hnr)
 
         assert word_table.count_housenumbers() == 1
@@ -584,17 +660,17 @@ class TestUpdateWordTokens:
         assert word_table.count_housenumbers() == 0
 
 
-    def test_keep_unused_numeral_housenumbers(self, word_table):
-        word_table.add_housenumber(1000, '5432')
+    def test_keep_unused_numeral_housenumbers(self, add_housenumber, word_table):
+        add_housenumber(1000, '5432')
 
         assert word_table.count_housenumbers() == 1
         self.tok.update_word_tokens()
         assert word_table.count_housenumbers() == 1
 
 
-    def test_keep_housenumbers_from_search_name_table(self, word_table, search_entry):
-        word_table.add_housenumber(9999, '5432a')
-        word_table.add_housenumber(9991, '9 a')
+    def test_keep_housenumbers_from_search_name_table(self, add_housenumber, word_table, search_entry):
+        add_housenumber(9999, '5432a')
+        add_housenumber(9991, '9 a')
         search_entry(123, 9999, 34)
 
         assert word_table.count_housenumbers() == 2
@@ -602,9 +678,9 @@ class TestUpdateWordTokens:
         assert word_table.count_housenumbers() == 1
 
 
-    def test_keep_housenumbers_from_placex_table(self, word_table, placex_table):
-        word_table.add_housenumber(9999, '5432a')
-        word_table.add_housenumber(9990, '34z')
+    def test_keep_housenumbers_from_placex_table(self, add_housenumber, word_table, placex_table):
+        add_housenumber(9999, '5432a')
+        add_housenumber(9990, '34z')
         placex_table.add(housenumber='34z')
         placex_table.add(housenumber='25432a')
 
@@ -613,9 +689,9 @@ class TestUpdateWordTokens:
         assert word_table.count_housenumbers() == 1
 
 
-    def test_keep_housenumbers_from_placex_table_hnr_list(self, word_table, placex_table):
-        word_table.add_housenumber(9991, '9 b')
-        word_table.add_housenumber(9990, '34z')
+    def test_keep_housenumbers_from_placex_table_hnr_list(self, add_housenumber, word_table, placex_table):
+        add_housenumber(9991, '9 b')
+        add_housenumber(9990, '34z')
         placex_table.add(housenumber='9 a;9 b;9 c')
 
         assert word_table.count_housenumbers() == 2
index 9b008cc5ee47042a90fe2b3ba54cb13915d4e8b6..afbd5e9bf813590ff6537f4893fd8325b48f1d09 100644 (file)
@@ -32,8 +32,9 @@ def make_analyser(*variants, variant_only=False):
         rules['mode'] = 'variant-only'
     config = module.configure(rules, DEFAULT_NORMALIZATION)
     trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
+    norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
 
-    return module.create(trans, config)
+    return module.create(norm, trans, config)
 
 
 def get_normalized_variants(proc, name):
@@ -45,8 +46,9 @@ def test_no_variants():
     rules = { 'analyzer': 'generic' }
     config = module.configure(rules, DEFAULT_NORMALIZATION)
     trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
+    norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
 
-    proc = module.create(trans, config)
+    proc = module.create(norm, trans, config)
 
     assert get_normalized_variants(proc, '大德!') == ['dà dé']
 
index 757f03112d47c1ff8a73f31c8eed696636238e9e..abe31f6d468ac631f86dbd1a1dc8d25205bbcdcc 100644 (file)
@@ -33,8 +33,9 @@ class TestMutationNoVariants:
                 }
         config = module.configure(rules, DEFAULT_NORMALIZATION)
         trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
+        norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
 
-        self.analysis = module.create(trans, config)
+        self.analysis = module.create(norm, trans, config)
 
 
     def variants(self, name):
index 4877e0adab13c91585dd0d4eb8497b261e0708dc..c9278f9ef36f7f53f586d78b30edf818bf2cfc5c 100755 (executable)
@@ -125,7 +125,7 @@ fi                                    #DOCS:
 #
 if [ "x$1" == "xyes" ]; then  #DOCS:    :::sh
     cd $USERHOME
-    git clone --recursive git://github.com/openstreetmap/Nominatim.git
+    git clone --recursive https://github.com/openstreetmap/Nominatim.git
     cd Nominatim
 else                               #DOCS:
     cd $USERHOME/Nominatim         #DOCS:
index a1a1fe304d6a8b3f1890479e02644e92fa13cc05..40ee7ba8f63a5be109cfb0175389ce9f682e433b 100755 (executable)
@@ -105,7 +105,7 @@ fi                                    #DOCS:
 #
 if [ "x$1" == "xyes" ]; then  #DOCS:    :::sh
     cd $USERHOME
-    git clone --recursive git://github.com/openstreetmap/Nominatim.git
+    git clone --recursive https://github.com/openstreetmap/Nominatim.git
     cd Nominatim
 else                               #DOCS:
     cd $USERHOME/Nominatim         #DOCS:
index 1fbabf24227bac156b3f3d69c67395a7731add89..68bd6b04e287f6ac8615492a13cdd3d535617f4f 100755 (executable)
@@ -99,7 +99,7 @@ fi                                    #DOCS:
 #
 if [ "x$1" == "xyes" ]; then  #DOCS:    :::sh
     cd $USERHOME
-    git clone --recursive git://github.com/openstreetmap/Nominatim.git
+    git clone --recursive https://github.com/openstreetmap/Nominatim.git
     cd Nominatim
 else                               #DOCS:
     cd $USERHOME/Nominatim         #DOCS: