X-Git-Url: https://git.openstreetmap.org./nominatim.git/blobdiff_plain/1f29475fa51455363f8cc7b55bec0e801afc638b..324b1b5575ce1793d90cdb9837230f76acd8169e:/test/bdd/steps/steps_db_ops.py diff --git a/test/bdd/steps/steps_db_ops.py b/test/bdd/steps/steps_db_ops.py index 178e1a28..be2789f3 100644 --- a/test/bdd/steps/steps_db_ops.py +++ b/test/bdd/steps/steps_db_ops.py @@ -1,8 +1,13 @@ +import logging +from itertools import chain + import psycopg2.extras from place_inserter import PlaceColumn from table_compare import NominatimID, DBRow +from nominatim.indexer import indexer +from nominatim.tokenizer import factory as tokenizer_factory def check_database_integrity(context): """ Check some generic constraints on the tables. @@ -20,6 +25,9 @@ def check_database_integrity(context): @given("the (?Pnamed )?places") def add_data_to_place_table(context, named): + """ Add entries into the place table. 'named places' makes sure that + the entries get a random name when none is explicitly given. + """ with context.db.cursor() as cur: cur.execute('ALTER TABLE place DISABLE TRIGGER place_before_insert') for row in context.table: @@ -28,6 +36,9 @@ def add_data_to_place_table(context, named): @given("the relations") def add_data_to_planet_relations(context): + """ Add entries into the osm2pgsql relation middle table. This is needed + for tests on data that looks up members. + """ with context.db.cursor() as cur: for r in context.table: last_node = 0 @@ -51,28 +62,24 @@ def add_data_to_planet_relations(context): else: members = None - tags = [] - for h in r.headings: - if h.startswith("tags+"): - tags.extend((h[5:], r[h])) + tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")]) cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags) VALUES (%s, %s, %s, %s, %s, %s)""", - (r['id'], last_node, last_way, parts, members, tags)) + (r['id'], last_node, last_way, parts, members, list(tags))) @given("the ways") def add_data_to_planet_ways(context): + """ Add entries into the osm2pgsql way middle table. This is necessary for + tests on that that looks up node ids in this table. + """ with context.db.cursor() as cur: for r in context.table: - tags = [] - for h in r.headings: - if h.startswith("tags+"): - tags.extend((h[5:], r[h])) - + tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")]) nodes = [ int(x.strip()) for x in r['nodes'].split(',') ] cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)", - (r['id'], nodes, tags)) + (r['id'], nodes, list(tags))) ################################ WHEN ################################## @@ -80,14 +87,38 @@ def add_data_to_planet_ways(context): def import_and_index_data_from_place_table(context): """ Import data previously set up in the place table. """ + nctx = context.nominatim + + tokenizer = tokenizer_factory.create_tokenizer(nctx.get_test_config()) context.nominatim.copy_from_place(context.db) - context.nominatim.run_setup_script('calculate-postcodes', 'index', 'index-noanalyse') + + # XXX use tool function as soon as it is ported + with context.db.cursor() as cur: + with (context.nominatim.src_dir / 'lib-sql' / 'postcode_tables.sql').open('r') as fd: + cur.execute(fd.read()) + cur.execute(""" + INSERT INTO location_postcode + (place_id, indexed_status, country_code, postcode, geometry) + SELECT nextval('seq_place'), 1, country_code, + upper(trim (both ' ' from address->'postcode')) as pc, + ST_Centroid(ST_Collect(ST_Centroid(geometry))) + FROM placex + WHERE address ? 'postcode' AND address->'postcode' NOT SIMILAR TO '%(,|;)%' + AND geometry IS NOT null + GROUP BY country_code, pc""") + + # Call directly as the refresh function does not include postcodes. + indexer.LOG.setLevel(logging.ERROR) + indexer.Indexer(context.nominatim.get_libpq_dsn(), tokenizer, 1).index_full(analyse=False) + check_database_integrity(context) @when("updating places") def update_place_table(context): - context.nominatim.run_setup_script( - 'create-functions', 'create-partition-functions', 'enable-diff-updates') + """ Update the place table with the given data. Also runs all triggers + related to updates and reindexes the new data. + """ + context.nominatim.run_nominatim('refresh', '--functions') with context.db.cursor() as cur: for row in context.table: PlaceColumn(context).add_row(row, False).db_insert(cur) @@ -97,12 +128,17 @@ def update_place_table(context): @when("updating postcodes") def update_postcodes(context): - context.nominatim.run_update_script('calculate-postcodes') + """ Rerun the calculation of postcodes. + """ + context.nominatim.run_nominatim('refresh', '--postcodes') @when("marking for delete (?P.*)") def delete_places(context, oids): - context.nominatim.run_setup_script( - 'create-functions', 'create-partition-functions', 'enable-diff-updates') + """ Remove entries from the place table. Multiple ids may be given + separated by commas. Also runs all triggers + related to updates and reindexes the new data. + """ + context.nominatim.run_nominatim('refresh', '--functions') with context.db.cursor() as cur: for oid in oids.split(','): NominatimID(oid).query_osm_id(cur, 'DELETE FROM place WHERE {}') @@ -113,6 +149,15 @@ def delete_places(context, oids): @then("(?Pplacex|place) contains(?P exactly)?") def check_place_contents(context, table, exact): + """ Check contents of place/placex tables. Each row represents a table row + and all data must match. Data not present in the expected table, may + be arbitry. The rows are identified via the 'object' column which must + have an identifier of the form '[:]'. When multiple + rows match (for example because 'class' was left out and there are + multiple entries for the given OSM object) then all must match. All + expected rows are expected to be present with at least one database row. + When 'exactly' is given, there must not be additional rows in the database. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: expected_content = set() for row in context.table: @@ -137,6 +182,9 @@ def check_place_contents(context, table, exact): @then("(?P
placex|place) has no entry for (?P.*)") def check_place_has_entry(context, table, oid): + """ Ensure that no database row for the given object exists. The ID + must be of the form '[:]'. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: NominatimID(oid).query_osm_id(cur, "SELECT * FROM %s where {}" % table) assert cur.rowcount == 0, \ @@ -145,50 +193,65 @@ def check_place_has_entry(context, table, oid): @then("search_name contains(?P not)?") def check_search_name_contents(context, exclude): - with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: - for row in context.table: - nid = NominatimID(row['object']) - nid.row_by_place_id(cur, 'search_name', - ['ST_X(centroid) as cx', 'ST_Y(centroid) as cy']) - assert cur.rowcount > 0, "No rows found for " + row['object'] + """ Check contents of place/placex tables. Each row represents a table row + and all data must match. Data not present in the expected table, may + be arbitry. The rows are identified via the 'object' column which must + have an identifier of the form '[:]'. All + expected rows are expected to be present with at least one database row. + """ + tokenizer = tokenizer_factory.get_tokenizer_for_db(context.nominatim.get_test_config()) + + with tokenizer.name_analyzer() as analyzer: + with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: + for row in context.table: + nid = NominatimID(row['object']) + nid.row_by_place_id(cur, 'search_name', + ['ST_X(centroid) as cx', 'ST_Y(centroid) as cy']) + assert cur.rowcount > 0, "No rows found for " + row['object'] + + for res in cur: + db_row = DBRow(nid, res, context) + for name, value in zip(row.headings, row.cells): + if name in ('name_vector', 'nameaddress_vector'): + items = [x.strip() for x in value.split(',')] + tokens = analyzer.get_word_token_info(items) - for res in cur: - db_row = DBRow(nid, res, context) - for h in row.headings: - if h in ('name_vector', 'nameaddress_vector'): - terms = [x.strip() for x in row[h].split(',') if not x.strip().startswith('#')] - words = [x.strip()[1:] for x in row[h].split(',') if x.strip().startswith('#')] - with context.db.cursor() as subcur: - subcur.execute(""" SELECT word_id, word_token - FROM word, (SELECT unnest(%s::TEXT[]) as term) t - WHERE word_token = make_standard_name(t.term) - and class is null and country_code is null - and operator is null - UNION - SELECT word_id, word_token - FROM word, (SELECT unnest(%s::TEXT[]) as term) t - WHERE word_token = ' ' || make_standard_name(t.term) - and class is null and country_code is null - and operator is null - """, - (terms, words)) if not exclude: - assert subcur.rowcount >= len(terms) + len(words), \ - "No word entry found for " + row[h] + ". Entries found: " + str(subcur.rowcount) - for wid in subcur: + assert len(tokens) >= len(items), \ + "No word entry found for {}. Entries found: {!s}".format(value, len(tokens)) + for word, token, wid in tokens: if exclude: - assert wid[0] not in res[h], "Found term for %s/%s: %s" % (row['object'], h, wid[1]) + assert wid not in res[name], \ + "Found term for {}/{}: {}".format(nid, name, wid) else: - assert wid[0] in res[h], "Missing term for %s/%s: %s" % (row['object'], h, wid[1]) - elif h != 'object': - assert db_row.contains(h, row[h]), db_row.assert_msg(h, row[h]) + assert wid in res[name], \ + "Missing term for {}/{}: {}".format(nid, name, wid) + elif name != 'object': + assert db_row.contains(name, value), db_row.assert_msg(name, value) + +@then("search_name has no entry for (?P.*)") +def check_search_name_has_entry(context, oid): + """ Check that there is noentry in the search_name table for the given + objects. IDs are in format '[:]'. + """ + with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: + NominatimID(oid).row_by_place_id(cur, 'search_name') + + assert cur.rowcount == 0, \ + "Found {} entries for ID {}".format(cur.rowcount, oid) @then("location_postcode contains exactly") def check_location_postcode(context): + """ Check full contents for location_postcode table. Each row represents a table row + and all data must match. Data not present in the expected table, may + be arbitry. The rows are identified via 'country' and 'postcode' columns. + All rows must be present as excepted and there must not be additional + rows. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode") assert cur.rowcount == len(list(context.table)), \ - "Postcode table has %d rows, expected %d rows." % (cur.rowcount, len(list(context.table))) + "Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table))) results = {} for row in cur: @@ -199,27 +262,66 @@ def check_location_postcode(context): for row in context.table: db_row = results.get((row['country'],row['postcode'])) assert db_row is not None, \ - "Missing row for country '{}' postcode '{}'.".format(r['country'],['postcode']) + "Missing row for country '{r['country']}' postcode '{r['postcode']}'.".format(r=row) db_row.assert_row(row, ('country', 'postcode')) @then("word contains(?P not)?") def check_word_table(context, exclude): + """ Check the contents of the word table. Each row represents a table row + and all data must match. Data not present in the expected table, may + be arbitry. The rows are identified via all given columns. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: for row in context.table: - wheres = [] - values = [] - for h in row.headings: - wheres.append("%s = %%s" % h) - values.append(row[h]) - cur.execute("SELECT * from word WHERE %s" % ' AND '.join(wheres), values) + wheres = ' AND '.join(["{} = %s".format(h) for h in row.headings]) + cur.execute("SELECT * from word WHERE " + wheres, list(row.cells)) if exclude: assert cur.rowcount == 0, "Row still in word table: %s" % '/'.join(values) else: assert cur.rowcount > 0, "Row not in word table: %s" % '/'.join(values) + +@then("there are(?P no)? word tokens for postcodes (?P.*)") +def check_word_table_for_postcodes(context, exclude, postcodes): + """ Check that the tokenizer produces postcode tokens for the given + postcodes. The postcodes are a comma-separated list of postcodes. + Whitespace matters. + """ + nctx = context.nominatim + tokenizer = tokenizer_factory.get_tokenizer_for_db(nctx.get_test_config()) + with tokenizer.name_analyzer() as ana: + plist = [ana.normalize_postcode(p) for p in postcodes.split(',')] + + plist.sort() + + with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: + if nctx.tokenizer == 'legacy_icu': + cur.execute("""SELECT info->>'postcode' FROM word + WHERE type = 'P' and info->>'postcode' = any(%s)""", + (plist,)) + else: + cur.execute("""SELECT word FROM word WHERE word = any(%s) + and class = 'place' and type = 'postcode'""", + (plist,)) + + found = [row[0] for row in cur] + assert len(found) == len(set(found)), f"Duplicate rows for postcodes: {found}" + + if exclude: + assert len(found) == 0, f"Unexpected postcodes: {found}" + else: + assert set(found) == set(plist), \ + f"Missing postcodes {set(plist) - set(found)}. Found: {found}" + @then("place_addressline contains") def check_place_addressline(context): + """ Check the contents of the place_addressline table. Each row represents + a table row and all data must match. Data not present in the expected + table, may be arbitry. The rows are identified via the 'object' column, + representing the addressee and the 'address' column, representing the + address item. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: for row in context.table: nid = NominatimID(row['object']) @@ -236,6 +338,9 @@ def check_place_addressline(context): @then("place_addressline doesn't contain") def check_place_addressline_exclude(context): + """ Check that the place_addressline doesn't contain any entries for the + given addressee/address item pairs. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: for row in context.table: pid = NominatimID(row['object']).get_place_id(cur) @@ -246,20 +351,18 @@ def check_place_addressline_exclude(context): assert cur.rowcount == 0, \ "Row found for place %s and address %s" % (row['object'], row['address']) -@then("(?P\w+) expands to(?P no)? interpolation") +@then("W(?P\d+) expands to(?P no)? interpolation") def check_location_property_osmline(context, oid, neg): - nid = NominatimID(oid) - - assert 'W' == nid.typ, "interpolation must be a way" - + """ Check that the given way is present in the interpolation table. + """ with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: cur.execute("""SELECT *, ST_AsText(linegeo) as geomtxt FROM location_property_osmline WHERE osm_id = %s AND startnumber IS NOT NULL""", - (nid.oid, )) + (oid, )) if neg: - assert cur.rowcount == 0 + assert cur.rowcount == 0, "Interpolation found for way {}.".format(oid) return todo = list(range(len(list(context.table)))) @@ -271,17 +374,10 @@ def check_location_property_osmline(context, oid, neg): todo.remove(i) break else: - assert False, "Unexpected row %s" % (str(res)) + assert False, "Unexpected row " + str(res) - DBRow(nid, res, context).assert_row(row, ('start', 'end')) + DBRow(oid, res, context).assert_row(row, ('start', 'end')) assert not todo -@then("search_name has no entry for (?P.*)") -def check_search_name_has_entry(context, oid): - with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: - NominatimID(oid).row_by_place_id(cur, 'search_name') - - assert cur.rowcount == 0, \ - "Found {} entries for ID {}".format(cur.rowcount, oid)