+ return 1
+
+
+def import_importance_csv(dsn: str, data_file: Path) -> int:
+ """ Replace wikipedia importance table with data from a
+ single CSV file.
+
+ The file must be a gzipped CSV and have the following columns:
+ language, title, importance, wikidata_id
+
+ Other columns may be present but will be ignored.
+ """
+ if not data_file.exists():
+ return 1
+
+ # Only import the first occurance of a wikidata ID.
+ # This keeps indexes and table small.
+ wd_done = set()
+
+ with connect(dsn) as conn:
+ with conn.cursor() as cur:
+ cur.drop_table('wikipedia_article')
+ cur.drop_table('wikipedia_redirect')
+ cur.drop_table('wikimedia_importance')
+ cur.execute("""CREATE TABLE wikimedia_importance (
+ language TEXT NOT NULL,
+ title TEXT NOT NULL,
+ importance double precision NOT NULL,
+ wikidata TEXT
+ ) """)
+
+ with gzip.open(str(data_file), 'rt') as fd, CopyBuffer() as buf:
+ for row in csv.DictReader(fd, delimiter='\t', quotechar='|'):
+ wd_id = int(row['wikidata_id'][1:])
+ buf.add(row['language'], row['title'], row['importance'],
+ None if wd_id in wd_done else row['wikidata_id'])
+ wd_done.add(wd_id)
+
+ if buf.size() > 10000000:
+ with conn.cursor() as cur:
+ buf.copy_out(cur, 'wikimedia_importance',
+ columns=['language', 'title', 'importance',
+ 'wikidata'])
+
+ with conn.cursor() as cur:
+ buf.copy_out(cur, 'wikimedia_importance',
+ columns=['language', 'title', 'importance', 'wikidata'])
+
+ with conn.cursor() as cur:
+ cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_title
+ ON wikimedia_importance (title)""")
+ cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_wikidata
+ ON wikimedia_importance (wikidata)
+ WHERE wikidata is not null""")
+
+ conn.commit()
+
+ return 0
+
+
+def import_importance_sql(dsn: str, data_file: Path, ignore_errors: bool) -> int:
+ """ Replace wikipedia importance table with data from an SQL file.
+ """
+ if not data_file.exists():