Improved CSV import speed by several orders of magnitude.

This commit is contained in:
Eevee 2009-07-31 00:03:02 -07:00
parent 7566351ce1
commit e8ed55c297

View file

@ -27,8 +27,7 @@ def command_csvimport(engine_uri, directory='.'):
from sqlalchemy.orm.attributes import instrumentation_registry from sqlalchemy.orm.attributes import instrumentation_registry
# Use autocommit in case rows fail due to foreign key incest session = connect(engine_uri)
session = connect(engine_uri, autocommit=True, autoflush=False)
metadata.create_all() metadata.create_all()
@ -74,11 +73,18 @@ def command_csvimport(engine_uri, directory='.'):
reader = csv.reader(csvfile, lineterminator='\n') reader = csv.reader(csvfile, lineterminator='\n')
column_names = [unicode(column) for column in reader.next()] column_names = [unicode(column) for column in reader.next()]
# Self-referential tables may contain rows with foreign keys of # Self-referential tables may contain rows with foreign keys of other
# other rows in the same table that do not yet exist. We'll keep # rows in the same table that do not yet exist. Pull these out and add
# a running list of these and try inserting them again after the # them to the session last
# rest are done # ASSUMPTION: Self-referential tables have a single PK called "id"
failed_rows = [] deferred_rows = [] # ( row referring to id, [foreign ids we need] )
seen_ids = {} # primary key we've seen => 1
# Fetch foreign key columns that point at this table, if any
self_ref_columns = []
for column in table_obj.c:
if any(_.references(table_obj) for _ in column.foreign_keys):
self_ref_columns.append(column)
for csvs in reader: for csvs in reader:
row = table_class() row = table_class()
@ -101,33 +107,44 @@ def command_csvimport(engine_uri, directory='.'):
setattr(row, column_name, value) setattr(row, column_name, value)
try: # May need to stash this row and add it later if it refers to a
session.add(row) # later row in this table
session.flush() if self_ref_columns:
except IntegrityError, e: foreign_ids = [getattr(row, _.name) for _ in self_ref_columns]
failed_rows.append(row) foreign_ids = [_ for _ in foreign_ids if _] # remove NULL ids
# Loop over the failed rows and keep trying to insert them. If a loop if not foreign_ids:
# doesn't manage to insert any rows, bail. # NULL key. Remember this row and add as usual.
do_another_loop = True seen_ids[row.id] = 1
while failed_rows and do_another_loop:
do_another_loop = False
for i, row in enumerate(failed_rows): elif all(_ in seen_ids for _ in foreign_ids):
try: # Non-NULL key we've already seen. Remember it and commit
session.add(row) # so we know the old row exists when we add the new one
session.flush() session.commit()
seen_ids[row.id] = 1
# Success! else:
del failed_rows[i] # Non-NULL future id. Save this and insert it later!
do_another_loop = True deferred_rows.append((row, foreign_ids))
except IntegrityError, e: continue
pass
if failed_rows: session.add(row)
print len(failed_rows), "rows failed"
else: session.commit()
print 'loaded'
# Attempt to add any spare rows we've collected
for row, foreign_ids in deferred_rows:
if not all(_ in seen_ids for _ in foreign_ids):
# Could happen if row A refers to B which refers to C.
# This is ridiculous and doesn't happen in my data so far
raise ValueError("Too many levels of self-reference! "
"Row was: " + str(row.__dict__))
session.add(row)
seen_ids[row.id] = 1
session.commit()
print 'loaded'
def command_csvexport(engine_uri, directory='.'): def command_csvexport(engine_uri, directory='.'):
import csv import csv