You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
559 lines
20 KiB
559 lines
20 KiB
14 years ago
|
# orm/unitofwork.py
|
||
|
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
|
||
|
#
|
||
|
# This module is part of SQLAlchemy and is released under
|
||
|
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||
|
|
||
|
"""The internals for the unit of work system.
|
||
|
|
||
|
The session's flush() process passes objects to a contextual object
|
||
|
here, which assembles flush tasks based on mappers and their properties,
|
||
|
organizes them in order of dependency, and executes.
|
||
|
|
||
|
"""
|
||
|
|
||
|
from sqlalchemy import util, topological
|
||
|
from sqlalchemy.orm import attributes, interfaces
|
||
|
from sqlalchemy.orm import util as mapperutil
|
||
|
from sqlalchemy.orm.util import _state_mapper
|
||
|
session = util.importlater("sqlalchemy.orm", "session")
|
||
|
|
||
|
class UOWEventHandler(interfaces.AttributeExtension):
|
||
|
"""An event handler added to all relationship attributes which handles
|
||
|
session cascade operations.
|
||
|
"""
|
||
|
|
||
|
active_history = False
|
||
|
|
||
|
def __init__(self, key):
|
||
|
self.key = key
|
||
|
|
||
|
def append(self, state, item, initiator):
|
||
|
# process "save_update" cascade rules for when
|
||
|
# an instance is appended to the list of another instance
|
||
|
|
||
|
sess = session._state_session(state)
|
||
|
if sess:
|
||
|
prop = _state_mapper(state).get_property(self.key)
|
||
|
if prop.cascade.save_update and \
|
||
|
(prop.cascade_backrefs or self.key == initiator.key) and \
|
||
|
item not in sess:
|
||
|
sess.add(item)
|
||
|
return item
|
||
|
|
||
|
def remove(self, state, item, initiator):
|
||
|
sess = session._state_session(state)
|
||
|
if sess:
|
||
|
prop = _state_mapper(state).get_property(self.key)
|
||
|
# expunge pending orphans
|
||
|
if prop.cascade.delete_orphan and \
|
||
|
item in sess.new and \
|
||
|
prop.mapper._is_orphan(attributes.instance_state(item)):
|
||
|
sess.expunge(item)
|
||
|
|
||
|
def set(self, state, newvalue, oldvalue, initiator):
|
||
|
# process "save_update" cascade rules for when an instance
|
||
|
# is attached to another instance
|
||
|
if oldvalue is newvalue:
|
||
|
return newvalue
|
||
|
|
||
|
sess = session._state_session(state)
|
||
|
if sess:
|
||
|
prop = _state_mapper(state).get_property(self.key)
|
||
|
if newvalue is not None and \
|
||
|
prop.cascade.save_update and \
|
||
|
(prop.cascade_backrefs or self.key == initiator.key) and \
|
||
|
newvalue not in sess:
|
||
|
sess.add(newvalue)
|
||
|
if prop.cascade.delete_orphan and \
|
||
|
oldvalue in sess.new and \
|
||
|
prop.mapper._is_orphan(attributes.instance_state(oldvalue)):
|
||
|
sess.expunge(oldvalue)
|
||
|
return newvalue
|
||
|
|
||
|
|
||
|
class UOWTransaction(object):
|
||
|
def __init__(self, session):
|
||
|
self.session = session
|
||
|
self.mapper_flush_opts = session._mapper_flush_opts
|
||
|
|
||
|
# dictionary used by external actors to
|
||
|
# store arbitrary state information.
|
||
|
self.attributes = {}
|
||
|
|
||
|
# dictionary of mappers to sets of
|
||
|
# DependencyProcessors, which are also
|
||
|
# set to be part of the sorted flush actions,
|
||
|
# which have that mapper as a parent.
|
||
|
self.deps = util.defaultdict(set)
|
||
|
|
||
|
# dictionary of mappers to sets of InstanceState
|
||
|
# items pending for flush which have that mapper
|
||
|
# as a parent.
|
||
|
self.mappers = util.defaultdict(set)
|
||
|
|
||
|
# a dictionary of Preprocess objects, which gather
|
||
|
# additional states impacted by the flush
|
||
|
# and determine if a flush action is needed
|
||
|
self.presort_actions = {}
|
||
|
|
||
|
# dictionary of PostSortRec objects, each
|
||
|
# one issues work during the flush within
|
||
|
# a certain ordering.
|
||
|
self.postsort_actions = {}
|
||
|
|
||
|
# a set of 2-tuples, each containing two
|
||
|
# PostSortRec objects where the second
|
||
|
# is dependent on the first being executed
|
||
|
# first
|
||
|
self.dependencies = set()
|
||
|
|
||
|
# dictionary of InstanceState-> (isdelete, listonly)
|
||
|
# tuples, indicating if this state is to be deleted
|
||
|
# or insert/updated, or just refreshed
|
||
|
self.states = {}
|
||
|
|
||
|
# tracks InstanceStates which will be receiving
|
||
|
# a "post update" call. Keys are mappers,
|
||
|
# values are a set of states and a set of the
|
||
|
# columns which should be included in the update.
|
||
|
self.post_update_states = util.defaultdict(lambda: (set(), set()))
|
||
|
|
||
|
@property
|
||
|
def has_work(self):
|
||
|
return bool(self.states)
|
||
|
|
||
|
def is_deleted(self, state):
|
||
|
"""return true if the given state is marked as deleted
|
||
|
within this uowtransaction."""
|
||
|
|
||
|
return state in self.states and self.states[state][0]
|
||
|
|
||
|
def memo(self, key, callable_):
|
||
|
if key in self.attributes:
|
||
|
return self.attributes[key]
|
||
|
else:
|
||
|
self.attributes[key] = ret = callable_()
|
||
|
return ret
|
||
|
|
||
|
def remove_state_actions(self, state):
|
||
|
"""remove pending actions for a state from the uowtransaction."""
|
||
|
|
||
|
isdelete = self.states[state][0]
|
||
|
|
||
|
self.states[state] = (isdelete, True)
|
||
|
|
||
|
def get_attribute_history(self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE):
|
||
|
"""facade to attributes.get_state_history(), including caching of results."""
|
||
|
|
||
|
hashkey = ("history", state, key)
|
||
|
|
||
|
# cache the objects, not the states; the strong reference here
|
||
|
# prevents newly loaded objects from being dereferenced during the
|
||
|
# flush process
|
||
|
if hashkey in self.attributes:
|
||
|
(history, cached_passive) = self.attributes[hashkey]
|
||
|
# if the cached lookup was "passive" and now we want non-passive, do a non-passive
|
||
|
# lookup and re-cache
|
||
|
if cached_passive and not passive:
|
||
|
history = state.get_history(key, passive=False)
|
||
|
self.attributes[hashkey] = (history, passive)
|
||
|
else:
|
||
|
history = state.get_history(key, passive=passive)
|
||
|
self.attributes[hashkey] = (history, passive)
|
||
|
|
||
|
if not history or not state.get_impl(key).uses_objects:
|
||
|
return history
|
||
|
else:
|
||
|
return history.as_state()
|
||
|
|
||
|
def has_dep(self, processor):
|
||
|
return (processor, True) in self.presort_actions
|
||
|
|
||
|
def register_preprocessor(self, processor, fromparent):
|
||
|
key = (processor, fromparent)
|
||
|
if key not in self.presort_actions:
|
||
|
self.presort_actions[key] = Preprocess(processor, fromparent)
|
||
|
|
||
|
def register_object(self, state, isdelete=False,
|
||
|
listonly=False, cancel_delete=False):
|
||
|
if not self.session._contains_state(state):
|
||
|
return
|
||
|
|
||
|
if state not in self.states:
|
||
|
mapper = _state_mapper(state)
|
||
|
|
||
|
if mapper not in self.mappers:
|
||
|
mapper._per_mapper_flush_actions(self)
|
||
|
|
||
|
self.mappers[mapper].add(state)
|
||
|
self.states[state] = (isdelete, listonly)
|
||
|
else:
|
||
|
if not listonly and (isdelete or cancel_delete):
|
||
|
self.states[state] = (isdelete, False)
|
||
|
|
||
|
def issue_post_update(self, state, post_update_cols):
|
||
|
mapper = state.manager.mapper.base_mapper
|
||
|
states, cols = self.post_update_states[mapper]
|
||
|
states.add(state)
|
||
|
cols.update(post_update_cols)
|
||
|
|
||
|
@util.memoized_property
|
||
|
def _mapper_for_dep(self):
|
||
|
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
|
||
|
True or False, indicating if the DependencyProcessor operates
|
||
|
on objects of that Mapper.
|
||
|
|
||
|
The result is stored in the dictionary persistently once
|
||
|
calculated.
|
||
|
|
||
|
"""
|
||
|
return util.PopulateDict(
|
||
|
lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop
|
||
|
)
|
||
|
|
||
|
def filter_states_for_dep(self, dep, states):
|
||
|
"""Filter the given list of InstanceStates to those relevant to the
|
||
|
given DependencyProcessor.
|
||
|
|
||
|
"""
|
||
|
mapper_for_dep = self._mapper_for_dep
|
||
|
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
|
||
|
|
||
|
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
|
||
|
checktup = (isdelete, listonly)
|
||
|
for mapper in mapper.base_mapper.self_and_descendants:
|
||
|
for state in self.mappers[mapper]:
|
||
|
if self.states[state] == checktup:
|
||
|
yield state
|
||
|
|
||
|
def _generate_actions(self):
|
||
|
"""Generate the full, unsorted collection of PostSortRecs as
|
||
|
well as dependency pairs for this UOWTransaction.
|
||
|
|
||
|
"""
|
||
|
# execute presort_actions, until all states
|
||
|
# have been processed. a presort_action might
|
||
|
# add new states to the uow.
|
||
|
while True:
|
||
|
ret = False
|
||
|
for action in list(self.presort_actions.values()):
|
||
|
if action.execute(self):
|
||
|
ret = True
|
||
|
if not ret:
|
||
|
break
|
||
|
|
||
|
# see if the graph of mapper dependencies has cycles.
|
||
|
self.cycles = cycles = topological.find_cycles(
|
||
|
self.dependencies,
|
||
|
self.postsort_actions.values())
|
||
|
|
||
|
if cycles:
|
||
|
# if yes, break the per-mapper actions into
|
||
|
# per-state actions
|
||
|
convert = dict(
|
||
|
(rec, set(rec.per_state_flush_actions(self)))
|
||
|
for rec in cycles
|
||
|
)
|
||
|
|
||
|
# rewrite the existing dependencies to point to
|
||
|
# the per-state actions for those per-mapper actions
|
||
|
# that were broken up.
|
||
|
for edge in list(self.dependencies):
|
||
|
if None in edge or \
|
||
|
edge[0].disabled or edge[1].disabled or \
|
||
|
cycles.issuperset(edge):
|
||
|
self.dependencies.remove(edge)
|
||
|
elif edge[0] in cycles:
|
||
|
self.dependencies.remove(edge)
|
||
|
for dep in convert[edge[0]]:
|
||
|
self.dependencies.add((dep, edge[1]))
|
||
|
elif edge[1] in cycles:
|
||
|
self.dependencies.remove(edge)
|
||
|
for dep in convert[edge[1]]:
|
||
|
self.dependencies.add((edge[0], dep))
|
||
|
|
||
|
return set([a for a in self.postsort_actions.values()
|
||
|
if not a.disabled
|
||
|
]
|
||
|
).difference(cycles)
|
||
|
|
||
|
def execute(self):
|
||
|
postsort_actions = self._generate_actions()
|
||
|
|
||
|
#sort = topological.sort(self.dependencies, postsort_actions)
|
||
|
#print "--------------"
|
||
|
#print self.dependencies
|
||
|
#print list(sort)
|
||
|
#print "COUNT OF POSTSORT ACTIONS", len(postsort_actions)
|
||
|
|
||
|
# execute
|
||
|
if self.cycles:
|
||
|
for set_ in topological.sort_as_subsets(
|
||
|
self.dependencies,
|
||
|
postsort_actions):
|
||
|
while set_:
|
||
|
n = set_.pop()
|
||
|
n.execute_aggregate(self, set_)
|
||
|
else:
|
||
|
for rec in topological.sort(
|
||
|
self.dependencies,
|
||
|
postsort_actions):
|
||
|
rec.execute(self)
|
||
|
|
||
|
|
||
|
def finalize_flush_changes(self):
|
||
|
"""mark processed objects as clean / deleted after a successful flush().
|
||
|
|
||
|
this method is called within the flush() method after the
|
||
|
execute() method has succeeded and the transaction has been committed.
|
||
|
|
||
|
"""
|
||
|
for state, (isdelete, listonly) in self.states.iteritems():
|
||
|
if isdelete:
|
||
|
self.session._remove_newly_deleted(state)
|
||
|
else:
|
||
|
# if listonly:
|
||
|
# debug... would like to see how many do this
|
||
|
self.session._register_newly_persistent(state)
|
||
|
|
||
|
class IterateMappersMixin(object):
|
||
|
def _mappers(self, uow):
|
||
|
if self.fromparent:
|
||
|
return iter(
|
||
|
m for m in self.dependency_processor.parent.self_and_descendants
|
||
|
if uow._mapper_for_dep[(m, self.dependency_processor)]
|
||
|
)
|
||
|
else:
|
||
|
return self.dependency_processor.mapper.self_and_descendants
|
||
|
|
||
|
class Preprocess(IterateMappersMixin):
|
||
|
def __init__(self, dependency_processor, fromparent):
|
||
|
self.dependency_processor = dependency_processor
|
||
|
self.fromparent = fromparent
|
||
|
self.processed = set()
|
||
|
self.setup_flush_actions = False
|
||
|
|
||
|
def execute(self, uow):
|
||
|
delete_states = set()
|
||
|
save_states = set()
|
||
|
|
||
|
for mapper in self._mappers(uow):
|
||
|
for state in uow.mappers[mapper].difference(self.processed):
|
||
|
(isdelete, listonly) = uow.states[state]
|
||
|
if not listonly:
|
||
|
if isdelete:
|
||
|
delete_states.add(state)
|
||
|
else:
|
||
|
save_states.add(state)
|
||
|
|
||
|
if delete_states:
|
||
|
self.dependency_processor.presort_deletes(uow, delete_states)
|
||
|
self.processed.update(delete_states)
|
||
|
if save_states:
|
||
|
self.dependency_processor.presort_saves(uow, save_states)
|
||
|
self.processed.update(save_states)
|
||
|
|
||
|
if (delete_states or save_states):
|
||
|
if not self.setup_flush_actions and (
|
||
|
self.dependency_processor.\
|
||
|
prop_has_changes(uow, delete_states, True) or
|
||
|
self.dependency_processor.\
|
||
|
prop_has_changes(uow, save_states, False)
|
||
|
):
|
||
|
self.dependency_processor.per_property_flush_actions(uow)
|
||
|
self.setup_flush_actions = True
|
||
|
return True
|
||
|
else:
|
||
|
return False
|
||
|
|
||
|
class PostSortRec(object):
|
||
|
disabled = False
|
||
|
|
||
|
def __new__(cls, uow, *args):
|
||
|
key = (cls, ) + args
|
||
|
if key in uow.postsort_actions:
|
||
|
return uow.postsort_actions[key]
|
||
|
else:
|
||
|
uow.postsort_actions[key] = \
|
||
|
ret = \
|
||
|
object.__new__(cls)
|
||
|
return ret
|
||
|
|
||
|
def execute_aggregate(self, uow, recs):
|
||
|
self.execute(uow)
|
||
|
|
||
|
def __repr__(self):
|
||
|
return "%s(%s)" % (
|
||
|
self.__class__.__name__,
|
||
|
",".join(str(x) for x in self.__dict__.values())
|
||
|
)
|
||
|
|
||
|
class ProcessAll(IterateMappersMixin, PostSortRec):
|
||
|
def __init__(self, uow, dependency_processor, delete, fromparent):
|
||
|
self.dependency_processor = dependency_processor
|
||
|
self.delete = delete
|
||
|
self.fromparent = fromparent
|
||
|
uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor)
|
||
|
|
||
|
def execute(self, uow):
|
||
|
states = self._elements(uow)
|
||
|
if self.delete:
|
||
|
self.dependency_processor.process_deletes(uow, states)
|
||
|
else:
|
||
|
self.dependency_processor.process_saves(uow, states)
|
||
|
|
||
|
def per_state_flush_actions(self, uow):
|
||
|
# this is handled by SaveUpdateAll and DeleteAll,
|
||
|
# since a ProcessAll should unconditionally be pulled
|
||
|
# into per-state if either the parent/child mappers
|
||
|
# are part of a cycle
|
||
|
return iter([])
|
||
|
|
||
|
def __repr__(self):
|
||
|
return "%s(%s, delete=%s)" % (
|
||
|
self.__class__.__name__,
|
||
|
self.dependency_processor,
|
||
|
self.delete
|
||
|
)
|
||
|
|
||
|
def _elements(self, uow):
|
||
|
for mapper in self._mappers(uow):
|
||
|
for state in uow.mappers[mapper]:
|
||
|
(isdelete, listonly) = uow.states[state]
|
||
|
if isdelete == self.delete and not listonly:
|
||
|
yield state
|
||
|
|
||
|
class IssuePostUpdate(PostSortRec):
|
||
|
def __init__(self, uow, mapper, isdelete):
|
||
|
self.mapper = mapper
|
||
|
self.isdelete = isdelete
|
||
|
|
||
|
def execute(self, uow):
|
||
|
states, cols = uow.post_update_states[self.mapper]
|
||
|
states = [s for s in states if uow.states[s][0] == self.isdelete]
|
||
|
|
||
|
self.mapper._post_update(states, uow, cols)
|
||
|
|
||
|
class SaveUpdateAll(PostSortRec):
|
||
|
def __init__(self, uow, mapper):
|
||
|
self.mapper = mapper
|
||
|
assert mapper is mapper.base_mapper
|
||
|
|
||
|
def execute(self, uow):
|
||
|
self.mapper._save_obj(
|
||
|
uow.states_for_mapper_hierarchy(self.mapper, False, False),
|
||
|
uow
|
||
|
)
|
||
|
|
||
|
def per_state_flush_actions(self, uow):
|
||
|
states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False))
|
||
|
for rec in self.mapper._per_state_flush_actions(
|
||
|
uow,
|
||
|
states,
|
||
|
False):
|
||
|
yield rec
|
||
|
|
||
|
for dep in uow.deps[self.mapper]:
|
||
|
states_for_prop = uow.filter_states_for_dep(dep, states)
|
||
|
dep.per_state_flush_actions(uow, states_for_prop, False)
|
||
|
|
||
|
class DeleteAll(PostSortRec):
|
||
|
def __init__(self, uow, mapper):
|
||
|
self.mapper = mapper
|
||
|
assert mapper is mapper.base_mapper
|
||
|
|
||
|
def execute(self, uow):
|
||
|
self.mapper._delete_obj(
|
||
|
uow.states_for_mapper_hierarchy(self.mapper, True, False),
|
||
|
uow
|
||
|
)
|
||
|
|
||
|
def per_state_flush_actions(self, uow):
|
||
|
states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False))
|
||
|
for rec in self.mapper._per_state_flush_actions(
|
||
|
uow,
|
||
|
states,
|
||
|
True):
|
||
|
yield rec
|
||
|
|
||
|
for dep in uow.deps[self.mapper]:
|
||
|
states_for_prop = uow.filter_states_for_dep(dep, states)
|
||
|
dep.per_state_flush_actions(uow, states_for_prop, True)
|
||
|
|
||
|
class ProcessState(PostSortRec):
|
||
|
def __init__(self, uow, dependency_processor, delete, state):
|
||
|
self.dependency_processor = dependency_processor
|
||
|
self.delete = delete
|
||
|
self.state = state
|
||
|
|
||
|
def execute_aggregate(self, uow, recs):
|
||
|
cls_ = self.__class__
|
||
|
dependency_processor = self.dependency_processor
|
||
|
delete = self.delete
|
||
|
our_recs = [r for r in recs
|
||
|
if r.__class__ is cls_ and
|
||
|
r.dependency_processor is dependency_processor and
|
||
|
r.delete is delete]
|
||
|
recs.difference_update(our_recs)
|
||
|
states = [self.state] + [r.state for r in our_recs]
|
||
|
if delete:
|
||
|
dependency_processor.process_deletes(uow, states)
|
||
|
else:
|
||
|
dependency_processor.process_saves(uow, states)
|
||
|
|
||
|
def __repr__(self):
|
||
|
return "%s(%s, %s, delete=%s)" % (
|
||
|
self.__class__.__name__,
|
||
|
self.dependency_processor,
|
||
|
mapperutil.state_str(self.state),
|
||
|
self.delete
|
||
|
)
|
||
|
|
||
|
class SaveUpdateState(PostSortRec):
|
||
|
def __init__(self, uow, state, mapper):
|
||
|
self.state = state
|
||
|
self.mapper = mapper
|
||
|
|
||
|
def execute_aggregate(self, uow, recs):
|
||
|
cls_ = self.__class__
|
||
|
mapper = self.mapper
|
||
|
our_recs = [r for r in recs
|
||
|
if r.__class__ is cls_ and
|
||
|
r.mapper is mapper]
|
||
|
recs.difference_update(our_recs)
|
||
|
mapper._save_obj(
|
||
|
[self.state] +
|
||
|
[r.state for r in our_recs],
|
||
|
uow)
|
||
|
|
||
|
def __repr__(self):
|
||
|
return "%s(%s)" % (
|
||
|
self.__class__.__name__,
|
||
|
mapperutil.state_str(self.state)
|
||
|
)
|
||
|
|
||
|
class DeleteState(PostSortRec):
|
||
|
def __init__(self, uow, state, mapper):
|
||
|
self.state = state
|
||
|
self.mapper = mapper
|
||
|
|
||
|
def execute_aggregate(self, uow, recs):
|
||
|
cls_ = self.__class__
|
||
|
mapper = self.mapper
|
||
|
our_recs = [r for r in recs
|
||
|
if r.__class__ is cls_ and
|
||
|
r.mapper is mapper]
|
||
|
recs.difference_update(our_recs)
|
||
|
states = [self.state] + [r.state for r in our_recs]
|
||
|
mapper._delete_obj(
|
||
|
[s for s in states if uow.states[s][0]],
|
||
|
uow)
|
||
|
|
||
|
def __repr__(self):
|
||
|
return "%s(%s)" % (
|
||
|
self.__class__.__name__,
|
||
|
mapperutil.state_str(self.state)
|
||
|
)
|
||
|
|