2009-08-19 01:02:53 +00:00
|
|
|
"""CSV to database or vice versa."""
|
|
|
|
import csv
|
|
|
|
import pkg_resources
|
|
|
|
import sys
|
|
|
|
|
|
|
|
from sqlalchemy.orm.attributes import instrumentation_registry
|
|
|
|
import sqlalchemy.types
|
|
|
|
|
|
|
|
from pokedex.db import metadata
|
|
|
|
import pokedex.db.tables as tables
|
|
|
|
|
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
def _get_verbose_prints(verbose):
|
|
|
|
"""If `verbose` is true, returns two functions: one for printing a starting
|
|
|
|
message, and the other for printing a success or failure message when
|
|
|
|
finished.
|
|
|
|
|
|
|
|
If `verbose` is false, returns two no-op functions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
import sys
|
|
|
|
def print_start(thing):
|
|
|
|
# Truncate to 66 characters, leaving 10 characters for a success
|
|
|
|
# or failure message
|
|
|
|
truncated_thing = thing[0:66]
|
|
|
|
|
|
|
|
# Also, space-pad to keep the cursor in a known column
|
|
|
|
num_spaces = 66 - len(truncated_thing)
|
|
|
|
|
|
|
|
print "%s...%s" % (truncated_thing, ' ' * num_spaces),
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
def print_done(msg='ok'):
|
|
|
|
print msg
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
return print_start, print_done
|
|
|
|
|
|
|
|
# Not verbose; return dummies
|
|
|
|
def dummy(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
|
|
|
|
return dummy, dummy
|
|
|
|
|
|
|
|
|
|
|
|
def load(session, directory=None, drop_tables=False, verbose=False):
|
2009-08-19 01:02:53 +00:00
|
|
|
"""Load data from CSV files into the given database session.
|
|
|
|
|
|
|
|
Tables are created automatically.
|
|
|
|
|
|
|
|
`session`
|
|
|
|
SQLAlchemy session to use.
|
|
|
|
|
|
|
|
`directory`
|
|
|
|
Directory the CSV files reside in. Defaults to the `pokedex` data
|
|
|
|
directory.
|
|
|
|
|
|
|
|
`drop_tables`
|
|
|
|
If set to True, existing `pokedex`-related tables will be dropped.
|
2009-08-19 01:36:45 +00:00
|
|
|
|
|
|
|
`verbose`
|
|
|
|
If set to True, status messages will be printed to stdout.
|
2009-08-19 01:02:53 +00:00
|
|
|
"""
|
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
# First take care of verbosity
|
|
|
|
print_start, print_done = _get_verbose_prints(verbose)
|
|
|
|
|
|
|
|
|
2009-08-19 01:02:53 +00:00
|
|
|
if not directory:
|
|
|
|
directory = pkg_resources.resource_filename('pokedex', 'data/csv')
|
|
|
|
|
|
|
|
# Drop all tables if requested
|
2009-08-19 01:36:45 +00:00
|
|
|
if drop_tables:
|
|
|
|
print_start('Dropping tables')
|
2009-08-19 01:02:53 +00:00
|
|
|
metadata.drop_all()
|
2009-08-19 01:36:45 +00:00
|
|
|
print_done()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
metadata.create_all()
|
2009-12-10 02:15:51 +00:00
|
|
|
connection = session.connection()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
# Okay, run through the tables and actually load the data now
|
|
|
|
for table_obj in metadata.sorted_tables:
|
|
|
|
table_name = table_obj.name
|
2009-12-10 02:15:51 +00:00
|
|
|
insert_stmt = table_obj.insert()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
print_start(table_name)
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
csvfile = open("%s/%s.csv" % (directory, table_name), 'rb')
|
|
|
|
except IOError:
|
|
|
|
# File doesn't exist; don't load anything!
|
2009-08-19 01:36:45 +00:00
|
|
|
print_done('missing?')
|
2009-08-19 01:02:53 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
reader = csv.reader(csvfile, lineterminator='\n')
|
|
|
|
column_names = [unicode(column) for column in reader.next()]
|
|
|
|
|
|
|
|
# Self-referential tables may contain rows with foreign keys of other
|
|
|
|
# rows in the same table that do not yet exist. Pull these out and add
|
|
|
|
# them to the session last
|
|
|
|
# ASSUMPTION: Self-referential tables have a single PK called "id"
|
|
|
|
deferred_rows = [] # ( row referring to id, [foreign ids we need] )
|
|
|
|
seen_ids = {} # primary key we've seen => 1
|
|
|
|
|
|
|
|
# Fetch foreign key columns that point at this table, if any
|
|
|
|
self_ref_columns = []
|
|
|
|
for column in table_obj.c:
|
|
|
|
if any(_.references(table_obj) for _ in column.foreign_keys):
|
|
|
|
self_ref_columns.append(column)
|
|
|
|
|
2009-12-10 02:15:51 +00:00
|
|
|
new_rows = []
|
|
|
|
def insert_and_commit():
|
|
|
|
session.connection().execute(insert_stmt, new_rows)
|
|
|
|
session.commit()
|
|
|
|
new_rows[:] = []
|
|
|
|
|
2009-08-19 01:02:53 +00:00
|
|
|
for csvs in reader:
|
2009-12-10 02:15:51 +00:00
|
|
|
row_data = {}
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
for column_name, value in zip(column_names, csvs):
|
|
|
|
column = table_obj.c[column_name]
|
|
|
|
if column.nullable and value == '':
|
|
|
|
# Empty string in a nullable column really means NULL
|
|
|
|
value = None
|
|
|
|
elif isinstance(column.type, sqlalchemy.types.Boolean):
|
|
|
|
# Boolean values are stored as string values 0/1, but both
|
|
|
|
# of those evaluate as true; SQLA wants True/False
|
|
|
|
if value == '0':
|
|
|
|
value = False
|
|
|
|
else:
|
|
|
|
value = True
|
|
|
|
else:
|
|
|
|
# Otherwise, unflatten from bytes
|
|
|
|
value = value.decode('utf-8')
|
|
|
|
|
2009-12-10 02:15:51 +00:00
|
|
|
# nb: Dictionaries flattened with ** have to have string keys
|
|
|
|
row_data[ str(column_name) ] = value
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
# May need to stash this row and add it later if it refers to a
|
|
|
|
# later row in this table
|
|
|
|
if self_ref_columns:
|
2009-12-10 02:15:51 +00:00
|
|
|
foreign_ids = [row_data[_.name] for _ in self_ref_columns]
|
2009-08-19 01:02:53 +00:00
|
|
|
foreign_ids = [_ for _ in foreign_ids if _] # remove NULL ids
|
|
|
|
|
|
|
|
if not foreign_ids:
|
|
|
|
# NULL key. Remember this row and add as usual.
|
2009-12-10 02:15:51 +00:00
|
|
|
seen_ids[row_data['id']] = 1
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
elif all(_ in seen_ids for _ in foreign_ids):
|
|
|
|
# Non-NULL key we've already seen. Remember it and commit
|
|
|
|
# so we know the old row exists when we add the new one
|
2009-12-10 02:15:51 +00:00
|
|
|
insert_and_commit()
|
|
|
|
seen_ids[row_data['id']] = 1
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
# Non-NULL future id. Save this and insert it later!
|
2009-12-10 02:15:51 +00:00
|
|
|
deferred_rows.append((row_data, foreign_ids))
|
2009-08-19 01:02:53 +00:00
|
|
|
continue
|
|
|
|
|
2009-12-10 02:15:51 +00:00
|
|
|
# Insert row!
|
|
|
|
new_rows.append(row_data)
|
2009-08-19 01:02:53 +00:00
|
|
|
|
2009-09-14 03:10:20 +00:00
|
|
|
# Remembering some zillion rows in the session consumes a lot of
|
|
|
|
# RAM. Let's not do that. Commit every 1000 rows
|
2009-12-10 02:15:51 +00:00
|
|
|
if len(new_rows) > 1000:
|
|
|
|
insert_and_commit()
|
2009-09-14 03:10:20 +00:00
|
|
|
|
2009-12-10 02:15:51 +00:00
|
|
|
insert_and_commit()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
# Attempt to add any spare rows we've collected
|
2009-12-10 02:15:51 +00:00
|
|
|
for row_data, foreign_ids in deferred_rows:
|
2009-08-19 01:02:53 +00:00
|
|
|
if not all(_ in seen_ids for _ in foreign_ids):
|
|
|
|
# Could happen if row A refers to B which refers to C.
|
|
|
|
# This is ridiculous and doesn't happen in my data so far
|
|
|
|
raise ValueError("Too many levels of self-reference! "
|
2009-12-10 02:15:51 +00:00
|
|
|
"Row was: " + str(row))
|
2009-08-19 01:02:53 +00:00
|
|
|
|
2009-12-10 02:15:51 +00:00
|
|
|
session.connection().execute(
|
|
|
|
insert_stmt.values(**row_data)
|
|
|
|
)
|
|
|
|
seen_ids[row_data['id']] = 1
|
|
|
|
session.commit()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
print_done()
|
2009-08-19 01:02:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
def dump(session, directory=None, verbose=False):
|
2009-08-19 01:02:53 +00:00
|
|
|
"""Dumps the contents of a database to a set of CSV files. Probably not
|
|
|
|
useful to anyone besides a developer.
|
|
|
|
|
|
|
|
`session`
|
|
|
|
SQLAlchemy session to use.
|
|
|
|
|
|
|
|
`directory`
|
|
|
|
Directory the CSV files should be put in. Defaults to the `pokedex`
|
|
|
|
data directory.
|
2009-08-19 01:36:45 +00:00
|
|
|
|
|
|
|
`verbose`
|
|
|
|
If set to True, status messages will be printed to stdout.
|
2009-08-19 01:02:53 +00:00
|
|
|
"""
|
|
|
|
|
2009-08-19 01:36:45 +00:00
|
|
|
# First take care of verbosity
|
|
|
|
print_start, print_done = _get_verbose_prints(verbose)
|
|
|
|
|
|
|
|
|
2009-08-19 01:02:53 +00:00
|
|
|
if not directory:
|
|
|
|
directory = pkg_resources.resource_filename('pokedex', 'data/csv')
|
|
|
|
|
|
|
|
for table_name in sorted(metadata.tables.keys()):
|
2009-08-19 01:36:45 +00:00
|
|
|
print_start(table_name)
|
2009-08-19 01:02:53 +00:00
|
|
|
table = metadata.tables[table_name]
|
|
|
|
|
|
|
|
writer = csv.writer(open("%s/%s.csv" % (directory, table_name), 'wb'),
|
|
|
|
lineterminator='\n')
|
|
|
|
columns = [col.name for col in table.columns]
|
|
|
|
writer.writerow(columns)
|
|
|
|
|
|
|
|
primary_key = table.primary_key
|
|
|
|
for row in session.query(table).order_by(*primary_key).all():
|
|
|
|
csvs = []
|
|
|
|
for col in columns:
|
|
|
|
# Convert Pythony values to something more universal
|
|
|
|
val = getattr(row, col)
|
|
|
|
if val == None:
|
|
|
|
val = ''
|
|
|
|
elif val == True:
|
|
|
|
val = '1'
|
|
|
|
elif val == False:
|
|
|
|
val = '0'
|
|
|
|
else:
|
|
|
|
val = unicode(val).encode('utf-8')
|
|
|
|
|
|
|
|
csvs.append(val)
|
|
|
|
|
|
|
|
writer.writerow(csvs)
|
2009-08-19 01:36:45 +00:00
|
|
|
|
|
|
|
print_done()
|