You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

780 lines
30 KiB

13 years ago
# orm/persistence.py
12 years ago
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
13 years ago
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from sqlalchemy import sql, util, exc as sa_exc
from sqlalchemy.orm import attributes, sync, \
exc as orm_exc
from sqlalchemy.orm.util import _state_mapper, state_str
def save_obj(base_mapper, states, uowtransaction, single=False):
12 years ago
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
13 years ago
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
12 years ago
base_mapper,
states,
13 years ago
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.iteritems():
12 years ago
insert = _collect_insert_commands(base_mapper, uowtransaction,
13 years ago
table, states_to_insert)
12 years ago
update = _collect_update_commands(base_mapper, uowtransaction,
13 years ago
table, states_to_update)
if update:
12 years ago
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
13 years ago
mapper, table, update)
if insert:
12 years ago
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
13 years ago
table, insert)
12 years ago
_finalize_insert_update_commands(base_mapper, uowtransaction,
13 years ago
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
12 years ago
base_mapper,
13 years ago
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.iteritems():
12 years ago
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
13 years ago
post_update_cols)
if update:
12 years ago
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
13 years ago
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
12 years ago
base_mapper,
13 years ago
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(table_to_mapper.keys()):
12 years ago
delete = _collect_delete_commands(base_mapper, uowtransaction,
13 years ago
table, states_to_delete)
mapper = table_to_mapper[table]
12 years ago
_emit_delete_statements(base_mapper, uowtransaction,
13 years ago
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
12 years ago
13 years ago
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
12 years ago
13 years ago
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
12 years ago
base_mapper, uowtransaction,
13 years ago
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
12 years ago
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
13 years ago
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
12 years ago
"with persistent instance %s" %
13 years ago
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
12 years ago
"transaction", instance_key,
13 years ago
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
12 years ago
(state, dict_, mapper, connection,
13 years ago
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
12 years ago
(state, dict_, mapper, connection,
13 years ago
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
12 years ago
def _organize_states_for_post_update(base_mapper, states,
13 years ago
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
12 years ago
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
13 years ago
the execution per state.
12 years ago
13 years ago
"""
12 years ago
return list(_connections_for_states(base_mapper, uowtransaction,
13 years ago
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
12 years ago
13 years ago
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
12 years ago
13 years ago
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
12 years ago
base_mapper, uowtransaction,
13 years ago
states):
mapper.dispatch.before_delete(mapper, connection, state)
12 years ago
states_to_delete.append((state, dict_, mapper,
13 years ago
bool(state.key), connection))
return states_to_delete
12 years ago
def _collect_insert_commands(base_mapper, uowtransaction, table,
13 years ago
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
12 years ago
13 years ago
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col.key] = mapper.version_id_generator(None)
else:
12 years ago
# pull straight from the dict for
13 years ago
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
12 years ago
insert.append((state, state_dict, params, mapper,
13 years ago
connection, value_params, has_all_pks))
return insert
12 years ago
def _collect_update_commands(base_mapper, uowtransaction,
13 years ago
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
12 years ago
13 years ago
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
12 years ago
row_switch or state,
row_switch and row_switch.dict
13 years ago
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
12 years ago
state, prop.key,
13 years ago
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
params[col.key] = mapper.version_id_generator(
params[col._label])
12 years ago
# HACK: check for history, in case the
13 years ago
# history is only
12 years ago
# in a different table than the one
13 years ago
# where the version_id_col is.
for prop in mapper._columntoproperty.itervalues():
history = attributes.get_state_history(
12 years ago
state, prop.key,
13 years ago
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
12 years ago
state, prop.key,
13 years ago
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
12 years ago
# use the old value to
13 years ago
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise sa_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
12 years ago
update.append((state, state_dict, params, mapper,
13 years ago
connection, value_params))
return update
12 years ago
def _collect_post_update_commands(base_mapper, uowtransaction, table,
13 years ago
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
12 years ago
state, prop.key,
13 years ago
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
12 years ago
update.append((state, state_dict, params, mapper,
13 years ago
connection))
return update
12 years ago
def _collect_delete_commands(base_mapper, uowtransaction, table,
13 years ago
states_to_delete):
12 years ago
"""Identify values to use in DELETE statements for a list of
13 years ago
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise sa_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
12 years ago
def _emit_update_statements(base_mapper, uowtransaction,
13 years ago
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
12 years ago
type_=mapper.version_id_col.type))
13 years ago
return table.update(clause)
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
12 years ago
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
13 years ago
c.context.postfetch_cols,
12 years ago
c.context.compiled_parameters[0],
13 years ago
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
12 years ago
"- versioning cannot be verified." %
13 years ago
c.dialect.dialect_description,
stacklevel=12)
12 years ago
def _emit_insert_statements(base_mapper, uowtransaction,
13 years ago
cached_connections, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks), \
12 years ago
records in groupby(insert,
lambda rec: (rec[4],
rec[2].keys(),
bool(rec[5]),
13 years ago
rec[6])
):
if has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
12 years ago
for (state, state_dict, params, mapper,
13 years ago
conn, value_params, has_all_pks), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper,
12 years ago
uowtransaction,
13 years ago
table,
12 years ago
state,
13 years ago
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
12 years ago
last_inserted_params,
13 years ago
value_params)
else:
for state, state_dict, params, mapper, \
connection, value_params, \
has_all_pks in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
12 years ago
for pk, col in zip(primary_key,
13 years ago
mapper._pks_by_table[table]):
prop = mapper._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper._set_state_attr_by_column(
12 years ago
state,
state_dict,
13 years ago
col, pk)
_postfetch(
mapper,
12 years ago
uowtransaction,
table,
state,
13 years ago
state_dict,
12 years ago
result.context.prefetch_cols,
13 years ago
result.context.postfetch_cols,
12 years ago
result.context.compiled_parameters[0],
13 years ago
value_params)
12 years ago
def _emit_post_update_statements(base_mapper, uowtransaction,
13 years ago
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
12 years ago
# also group them into common (connection, cols) sets
13 years ago
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], rec[2].keys())
):
connection = key[0]
12 years ago
multiparams = [params for state, state_dict,
13 years ago
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
12 years ago
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
13 years ago
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
12 years ago
mapper.version_id_col ==
13 years ago
sql.bindparam(
12 years ago
mapper.version_id_col.key,
13 years ago
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.iteritems():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
12 years ago
"delete %d row(s); %d were matched." %
13 years ago
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
12 years ago
"- versioning cannot be verified." %
13 years ago
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
12 years ago
def _finalize_insert_update_commands(base_mapper, uowtransaction,
13 years ago
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
12 years ago
13 years ago
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
12 years ago
[p.key for p in mapper._readonly_props
13 years ago
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state.expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled,
# refresh whatever has been expired.
if base_mapper.eager_defaults and state.unloaded:
state.key = base_mapper._identity_key_from_state(state)
uowtransaction.session.query(base_mapper)._load_on_ident(
state.key, refresh_state=state,
only_load_props=state.unloaded)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
12 years ago
def _postfetch(mapper, uowtransaction, table,
13 years ago
state, dict_, prefetch_cols, postfetch_cols,
params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
12 years ago
state.expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
13 years ago
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
12 years ago
sync.populate(state, m, state, m,
equated_pairs,
13 years ago
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
12 years ago
13 years ago
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
12 years ago
13 years ago
"""
# if session has a connection callable,
12 years ago
# organize individual states with the connection
13 years ago
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
12 years ago
connection = None
13 years ago
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
12 years ago
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
13 years ago
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn:conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q:q.key[1])