]> git.openstreetmap.org Git - nominatim.git/blobdiff - test/bdd/steps/db_ops.py
Merge remote-tracking branch 'upstream/master'
[nominatim.git] / test / bdd / steps / db_ops.py
index 30c216a5f22f937e2b9b1ea85e3d189a0df9b061..377c977d412986de192537928117ff39b86188b0 100644 (file)
@@ -22,6 +22,8 @@ class PlaceColumn:
             self.add_hstore('extratags', key[6:], value)
         elif key.startswith('addr+'):
             self.add_hstore('address', key[5:], value)
             self.add_hstore('extratags', key[6:], value)
         elif key.startswith('addr+'):
             self.add_hstore('address', key[5:], value)
+        elif key in ('name', 'address', 'extratags'):
+            self.columns[key] = eval('{' + value + '}')
         else:
             assert_in(key, ('class', 'type'))
             self.columns[key] = None if value == '' else value
         else:
             assert_in(key, ('class', 'type'))
             self.columns[key] = None if value == '' else value
@@ -108,6 +110,9 @@ class PlaceObjName(object):
         if self.pid is None:
             return "<null>"
 
         if self.pid is None:
             return "<null>"
 
+        if self.pid == 0:
+            return "place ID 0"
+
         cur = self.conn.cursor()
         cur.execute("""SELECT osm_type, osm_id, class
                        FROM placex WHERE place_id = %s""",
         cur = self.conn.cursor()
         cur.execute("""SELECT osm_type, osm_id, class
                        FROM placex WHERE place_id = %s""",
@@ -130,6 +135,18 @@ def compare_place_id(expected, result, column, context):
             LazyFmt("Bad place id in column %s. Expected: %s, got: %s.",
                     column, expected, PlaceObjName(result, context.db)))
 
             LazyFmt("Bad place id in column %s. Expected: %s, got: %s.",
                     column, expected, PlaceObjName(result, context.db)))
 
+def check_database_integrity(context):
+    """ Check some generic constraints on the tables.
+    """
+    # place_addressline should not have duplicate (place_id, address_place_id)
+    cur = context.db.cursor()
+    cur.execute("""SELECT count(*) FROM
+                    (SELECT place_id, address_place_id, count(*) as c
+                     FROM place_addressline GROUP BY place_id, address_place_id) x
+                   WHERE c > 1""")
+    eq_(0, cur.fetchone()[0], "Duplicates found in place_addressline")
+
+
 class NominatimID:
     """ Splits a unique identifier for places into its components.
         As place_ids cannot be used for testing, we use a unique
 class NominatimID:
     """ Splits a unique identifier for places into its components.
         As place_ids cannot be used for testing, we use a unique
@@ -183,10 +200,18 @@ def assert_db_column(row, column, value, context):
         return
 
     if column.startswith('centroid'):
         return
 
     if column.startswith('centroid'):
-        fac = float(column[9:]) if column.startswith('centroid*') else 1.0
-        x, y = value.split(' ')
-        assert_almost_equal(float(x) * fac, row['cx'], "Bad x coordinate")
-        assert_almost_equal(float(y) * fac, row['cy'], "Bad y coordinate")
+        if value == 'in geometry':
+            query = """SELECT ST_Within(ST_SetSRID(ST_Point({}, {}), 4326),
+                                        ST_SetSRID('{}'::geometry, 4326))""".format(
+                      row['cx'], row['cy'], row['geomtxt'])
+            cur = context.db.cursor()
+            cur.execute(query)
+            eq_(cur.fetchone()[0], True, "(Row %s failed: %s)" % (column, query))
+        else:
+            fac = float(column[9:]) if column.startswith('centroid*') else 1.0
+            x, y = value.split(' ')
+            assert_almost_equal(float(x) * fac, row['cx'], msg="Bad x coordinate")
+            assert_almost_equal(float(y) * fac, row['cy'], msg="Bad y coordinate")
     elif column == 'geometry':
         geom = context.osm.parse_geometry(value, context.scene)
         cur = context.db.cursor()
     elif column == 'geometry':
         geom = context.osm.parse_geometry(value, context.scene)
         cur = context.db.cursor()
@@ -278,16 +303,17 @@ def import_and_index_data_from_place_table(context):
     context.nominatim.run_setup_script('create-functions', 'create-partition-functions')
     cur = context.db.cursor()
     cur.execute(
     context.nominatim.run_setup_script('create-functions', 'create-partition-functions')
     cur = context.db.cursor()
     cur.execute(
-        """insert into placex (osm_type, osm_id, class, type, name, admin_level,
-           address, extratags, geometry)
-           select * from place where not (class='place' and type='houses' and osm_type='W')""")
+        """insert into placex (osm_type, osm_id, class, type, name, admin_level, address, extratags, geometry)
+           select              osm_type, osm_id, class, type, name, admin_level, address, extratags, geometry
+           from place where not (class='place' and type='houses' and osm_type='W')""")
     cur.execute(
             """insert into location_property_osmline (osm_id, address, linegeo)
              SELECT osm_id, address, geometry from place
               WHERE class='place' and type='houses' and osm_type='W'
                     and ST_GeometryType(geometry) = 'ST_LineString'""")
     context.db.commit()
     cur.execute(
             """insert into location_property_osmline (osm_id, address, linegeo)
              SELECT osm_id, address, geometry from place
               WHERE class='place' and type='houses' and osm_type='W'
                     and ST_GeometryType(geometry) = 'ST_LineString'""")
     context.db.commit()
-    context.nominatim.run_setup_script('index', 'index-noanalyse')
+    context.nominatim.run_setup_script('calculate-postcodes', 'index', 'index-noanalyse')
+    check_database_integrity(context)
 
 @when("updating places")
 def update_place_table(context):
 
 @when("updating places")
 def update_place_table(context):
@@ -312,6 +338,12 @@ def update_place_table(context):
         if cur.rowcount == 0:
             break
 
         if cur.rowcount == 0:
             break
 
+    check_database_integrity(context)
+
+@when("updating postcodes")
+def update_postcodes(context):
+    context.nominatim.run_update_script('calculate-postcodes')
+
 @when("marking for delete (?P<oids>.*)")
 def delete_places(context, oids):
     context.nominatim.run_setup_script(
 @when("marking for delete (?P<oids>.*)")
 def delete_places(context, oids):
     context.nominatim.run_setup_script(
@@ -348,7 +380,13 @@ def check_placex_contents(context, exact):
             if exact:
                 expected_content.add((res['osm_type'], res['osm_id'], res['class']))
             for h in row.headings:
             if exact:
                 expected_content.add((res['osm_type'], res['osm_id'], res['class']))
             for h in row.headings:
-                if h.startswith('name'):
+                if h in ('extratags', 'address'):
+                    if row[h] == '-':
+                        assert_is_none(res[h])
+                    else:
+                        vdict = eval('{' + row[h] + '}')
+                        assert_equals(vdict, res[h])
+                elif h.startswith('name'):
                     name = h[5:] if h.startswith('name+') else 'name'
                     assert_in(name, res['name'])
                     eq_(res['name'][name], row[h])
                     name = h[5:] if h.startswith('name+') else 'name'
                     assert_in(name, res['name'])
                     eq_(res['name'][name], row[h])
@@ -419,8 +457,8 @@ def check_placex_contents(context, exact):
 
     context.db.commit()
 
 
     context.db.commit()
 
-@then("search_name contains")
-def check_search_name_contents(context):
+@then("search_name contains(?P<exclude> not)?")
+def check_search_name_contents(context, exclude):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
 
     for row in context.table:
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
 
     for row in context.table:
@@ -432,23 +470,75 @@ def check_search_name_contents(context):
         for res in cur:
             for h in row.headings:
                 if h in ('name_vector', 'nameaddress_vector'):
         for res in cur:
             for h in row.headings:
                 if h in ('name_vector', 'nameaddress_vector'):
-                    terms = [x.strip().replace('#', ' ') for x in row[h].split(',')]
+                    terms = [x.strip() for x in row[h].split(',') if not x.strip().startswith('#')]
+                    words = [x.strip()[1:] for x in row[h].split(',') if x.strip().startswith('#')]
                     subcur = context.db.cursor()
                     subcur = context.db.cursor()
-                    subcur.execute("""SELECT word_id, word_token
-                                      FROM word, (SELECT unnest(%s) as term) t
-                                      WHERE word_token = make_standard_name(t.term)""",
-                                   (terms,))
-                    ok_(subcur.rowcount >= len(terms),
-                        "No word entry found for " + row[h])
+                    subcur.execute(""" SELECT word_id, word_token
+                                       FROM word, (SELECT unnest(%s::TEXT[]) as term) t
+                                       WHERE word_token = make_standard_name(t.term)
+                                             and class is null and country_code is null
+                                             and operator is null
+                                      UNION
+                                       SELECT word_id, word_token
+                                       FROM word, (SELECT unnest(%s::TEXT[]) as term) t
+                                       WHERE word_token = ' ' || make_standard_name(t.term)
+                                             and class is null and country_code is null
+                                             and operator is null
+                                   """,
+                                   (terms, words))
+                    if not exclude:
+                        ok_(subcur.rowcount >= len(terms),
+                            "No word entry found for " + row[h])
                     for wid in subcur:
                     for wid in subcur:
-                        assert_in(wid[0], res[h],
-                                  "Missing term for %s/%s: %s" % (pid, h, wid[1]))
+                        if exclude:
+                            assert_not_in(wid[0], res[h],
+                                          "Found term for %s/%s: %s" % (pid, h, wid[1]))
+                        else:
+                            assert_in(wid[0], res[h],
+                                      "Missing term for %s/%s: %s" % (pid, h, wid[1]))
                 else:
                     assert_db_column(res, h, row[h], context)
 
 
     context.db.commit()
 
                 else:
                     assert_db_column(res, h, row[h], context)
 
 
     context.db.commit()
 
+@then("location_postcode contains exactly")
+def check_location_postcode(context):
+    cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+    cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
+    eq_(cur.rowcount, len(list(context.table)),
+        "Postcode table has %d rows, expected %d rows."
+          % (cur.rowcount, len(list(context.table))))
+
+    table = list(cur)
+    for row in context.table:
+        for i in range(len(table)):
+            if table[i]['country_code'] != row['country'] \
+                    or table[i]['postcode'] != row['postcode']:
+                continue
+            for h in row.headings:
+                if h not in ('country', 'postcode'):
+                    assert_db_column(table[i], h, row[h], context)
+
+@then("word contains(?P<exclude> not)?")
+def check_word_table(context, exclude):
+    cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+    for row in context.table:
+        wheres = []
+        values = []
+        for h in row.headings:
+            wheres.append("%s = %%s" % h)
+            values.append(row[h])
+        cur.execute("SELECT * from word WHERE %s" % ' AND '.join(wheres), values)
+        if exclude:
+            eq_(0, cur.rowcount,
+                "Row still in word table: %s" % '/'.join(values))
+        else:
+            assert_greater(cur.rowcount, 0,
+                           "Row not in word table: %s" % '/'.join(values))
+
 @then("place_addressline contains")
 def check_place_addressline(context):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
 @then("place_addressline contains")
 def check_place_addressline(context):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
@@ -470,6 +560,21 @@ def check_place_addressline(context):
 
     context.db.commit()
 
 
     context.db.commit()
 
+@then("place_addressline doesn't contain")
+def check_place_addressline_exclude(context):
+    cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+    for row in context.table:
+        pid = NominatimID(row['object']).get_place_id(cur)
+        apid = NominatimID(row['address']).get_place_id(cur)
+        cur.execute(""" SELECT * FROM place_addressline
+                        WHERE place_id = %s AND address_place_id = %s""",
+                    (pid, apid))
+        eq_(0, cur.rowcount,
+            "Row found for place %s and address %s" % (row['object'], row['address']))
+
+    context.db.commit()
+
 @then("(?P<oid>\w+) expands to(?P<neg> no)? interpolation")
 def check_location_property_osmline(context, oid, neg):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
 @then("(?P<oid>\w+) expands to(?P<neg> no)? interpolation")
 def check_location_property_osmline(context, oid, neg):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)