X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/efafa5271957fb54b356ec1c90e8613f14de40d4..3bf3b894eaddd4f17f7e92353af6b2aa6200ab20:/test/bdd/steps/steps_db_ops.py diff --git a/test/bdd/steps/steps_db_ops.py b/test/bdd/steps/steps_db_ops.py index 4970f6df..8fd918f8 100644 --- a/test/bdd/steps/steps_db_ops.py +++ b/test/bdd/steps/steps_db_ops.py @@ -18,13 +18,19 @@ from nominatim.tokenizer import factory as tokenizer_factory def check_database_integrity(context): """ Check some generic constraints on the tables. """ - # place_addressline should not have duplicate (place_id, address_place_id) - cur = context.db.cursor() - cur.execute("""SELECT count(*) FROM - (SELECT place_id, address_place_id, count(*) as c - FROM place_addressline GROUP BY place_id, address_place_id) x - WHERE c > 1""") - assert cur.fetchone()[0] == 0, "Duplicates found in place_addressline" + with context.db.cursor() as cur: + # place_addressline should not have duplicate (place_id, address_place_id) + cur.execute("""SELECT count(*) FROM + (SELECT place_id, address_place_id, count(*) as c + FROM place_addressline GROUP BY place_id, address_place_id) x + WHERE c > 1""") + assert cur.fetchone()[0] == 0, "Duplicates found in place_addressline" + + # word table must not have empty word_tokens + if context.nominatim.tokenizer != 'legacy': + cur.execute("SELECT count(*) FROM word WHERE word_token = ''") + assert cur.fetchone()[0] == 0, "Empty word tokens found in word table" + ################################ GIVEN ################################## @@ -93,32 +99,17 @@ def add_data_to_planet_ways(context): def import_and_index_data_from_place_table(context): """ Import data previously set up in the place table. """ - nctx = context.nominatim - - tokenizer = tokenizer_factory.create_tokenizer(nctx.get_test_config()) - context.nominatim.copy_from_place(context.db) - - # XXX use tool function as soon as it is ported - with context.db.cursor() as cur: - with (context.nominatim.src_dir / 'lib-sql' / 'postcode_tables.sql').open('r') as fd: - cur.execute(fd.read()) - cur.execute(""" - INSERT INTO location_postcode - (place_id, indexed_status, country_code, postcode, geometry) - SELECT nextval('seq_place'), 1, country_code, - upper(trim (both ' ' from address->'postcode')) as pc, - ST_Centroid(ST_Collect(ST_Centroid(geometry))) - FROM placex - WHERE address ? 'postcode' AND address->'postcode' NOT SIMILAR TO '%(,|;)%' - AND geometry IS NOT null - GROUP BY country_code, pc""") - - # Call directly as the refresh function does not include postcodes. - indexer.LOG.setLevel(logging.ERROR) - indexer.Indexer(context.nominatim.get_libpq_dsn(), tokenizer, 1).index_full(analyse=False) + context.nominatim.run_nominatim('import', '--continue', 'load-data', + '--index-noanalyse', '-q', + '--offline') check_database_integrity(context) + # Remove the output of the input, when all was right. Otherwise it will be + # output when there are errors that had nothing to do with the import + # itself. + context.log_capture.buffer.clear() + @when("updating places") def update_place_table(context): """ Update the place table with the given data. Also runs all triggers @@ -132,6 +123,12 @@ def update_place_table(context): context.nominatim.reindex_placex(context.db) check_database_integrity(context) + # Remove the output of the input, when all was right. Otherwise it will be + # output when there are errors that had nothing to do with the import + # itself. + context.log_capture.buffer.clear() + + @when("updating postcodes") def update_postcodes(context): """ Rerun the calculation of postcodes. @@ -151,6 +148,11 @@ def delete_places(context, oids): context.nominatim.reindex_placex(context.db) + # Remove the output of the input, when all was right. Otherwise it will be + # output when there are errors that had nothing to do with the import + # itself. + context.log_capture.buffer.clear() + ################################ THEN ################################## @then("(?P