]> git.openstreetmap.org Git - nominatim.git/blobdiff - test/bdd/steps/db_ops.py
Merge remote-tracking branch 'upstream/master'
[nominatim.git] / test / bdd / steps / db_ops.py
index be2211fad9fb17c5ee65eba5ca3350106fd15c12..a0924840e5210947485525c401ee9f38ed68d283 100644 (file)
@@ -22,6 +22,8 @@ class PlaceColumn:
             self.add_hstore('extratags', key[6:], value)
         elif key.startswith('addr+'):
             self.add_hstore('address', key[5:], value)
+        elif key in ('name', 'address', 'extratags'):
+            self.columns[key] = eval('{' + value + '}')
         else:
             assert_in(key, ('class', 'type'))
             self.columns[key] = None if value == '' else value
@@ -130,6 +132,18 @@ def compare_place_id(expected, result, column, context):
             LazyFmt("Bad place id in column %s. Expected: %s, got: %s.",
                     column, expected, PlaceObjName(result, context.db)))
 
+def check_database_integrity(context):
+    """ Check some generic constraints on the tables.
+    """
+    # place_addressline should not have duplicate (place_id, address_place_id)
+    cur = context.db.cursor()
+    cur.execute("""SELECT count(*) FROM
+                    (SELECT place_id, address_place_id, count(*) as c
+                     FROM place_addressline GROUP BY place_id, address_place_id) x
+                   WHERE c > 1""")
+    eq_(0, cur.fetchone()[0], "Duplicates found in place_addressline")
+
+
 class NominatimID:
     """ Splits a unique identifier for places into its components.
         As place_ids cannot be used for testing, we use a unique
@@ -288,6 +302,7 @@ def import_and_index_data_from_place_table(context):
                     and ST_GeometryType(geometry) = 'ST_LineString'""")
     context.db.commit()
     context.nominatim.run_setup_script('calculate-postcodes', 'index', 'index-noanalyse')
+    check_database_integrity(context)
 
 @when("updating places")
 def update_place_table(context):
@@ -312,6 +327,8 @@ def update_place_table(context):
         if cur.rowcount == 0:
             break
 
+    check_database_integrity(context)
+
 @when("marking for delete (?P<oids>.*)")
 def delete_places(context, oids):
     context.nominatim.run_setup_script(
@@ -425,8 +442,8 @@ def check_placex_contents(context, exact):
 
     context.db.commit()
 
-@then("search_name contains")
-def check_search_name_contents(context):
+@then("search_name contains(?P<exclude> not)?")
+def check_search_name_contents(context, exclude):
     cur = context.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
 
     for row in context.table:
@@ -444,11 +461,16 @@ def check_search_name_contents(context):
                                       FROM word, (SELECT unnest(%s) as term) t
                                       WHERE word_token = make_standard_name(t.term)""",
                                    (terms,))
-                    ok_(subcur.rowcount >= len(terms),
-                        "No word entry found for " + row[h])
+                    if not exclude:
+                        ok_(subcur.rowcount >= len(terms),
+                            "No word entry found for " + row[h])
                     for wid in subcur:
-                        assert_in(wid[0], res[h],
-                                  "Missing term for %s/%s: %s" % (pid, h, wid[1]))
+                        if exclude:
+                            assert_not_in(wid[0], res[h],
+                                          "Found term for %s/%s: %s" % (pid, h, wid[1]))
+                        else:
+                            assert_in(wid[0], res[h],
+                                      "Missing term for %s/%s: %s" % (pid, h, wid[1]))
                 else:
                     assert_db_column(res, h, row[h], context)