Updated SqlAlchemy

This commit is contained in:
2017-04-15 16:27:12 +00:00
parent 2c790e1fe1
commit e3267d4bda
59 changed files with 30236 additions and 26049 deletions

View File

@@ -1 +1,11 @@
# ext/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util as _sa_util
_sa_util.dependencies.resolve_all("sqlalchemy.ext")

View File

@@ -1,3 +1,10 @@
# ext/associationproxy.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
@@ -9,43 +16,37 @@ See the example ``examples/association/proxied_association.py``.
import itertools
import operator
import weakref
from sqlalchemy import exceptions
from sqlalchemy import orm
from sqlalchemy import util
from sqlalchemy.orm import collections
from sqlalchemy.sql import not_
from .. import exc, orm, util
from ..orm import collections, interfaces
from ..sql import not_, or_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of *attr* over a collection.
r"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
Implements a read/write view over an instance's *target_collection*,
extracting *attr* from each member of the collection. The property acts
somewhat like this list comprehension::
The returned value is an instance of :class:`.AssociationProxy`.
[getattr(member, *attr*)
for member in getattr(instance, *target_collection*)]
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
Unlike the list comprehension, the collection returned by the property is
always in sync with *target_collection*, and mutations made to either
collection will be reflected in both.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
Implements a Python property representing a relationship as a collection of
simpler values. The proxied property will mimic the collection type of
the target (list, dict or set), or, in the case of a one to one relationship,
a simple scalar value.
:param target_collection: Name of the relationship attribute we'll proxy to,
usually created with :func:`~sqlalchemy.orm.relationship`.
:param attr: Attribute on the associated instances we'll proxy for.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then simply:
getattr(obj, *attr*)
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
@@ -69,59 +70,78 @@ def association_proxy(target_collection, attr, **kw):
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`AssociationProxy`.
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
class AssociationProxy(object):
ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY')
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attibute.
"""
class AssociationProxy(interfaces.InspectionAttrInfo):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = False
extension_type = ASSOCIATION_PROXY
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None, proxy_bulk_set=None):
"""Arguments are:
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None, info=None):
"""Construct a new :class:`.AssociationProxy`.
target_collection
Name of the collection we'll proxy to, usually created with
'relationship()' in a mapper setup.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
attr
Attribute on the collected instances we'll proxy for. For example,
given a target collection of [obj1, obj2], a list created by this
proxy property would look like [getattr(obj1, attr), getattr(obj2,
attr)]
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
creator
Optional. When new items are added to this proxied collection, new
instances of the class collected by the target collection will be
created. For list and set collections, the target class constructor
will be called with the 'value' for the new instance. For dict
types, two arguments are passed: key and value.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
getset_factory
Optional. Proxied attribute access is automatically handled by
routines that get and set values based on the `attr` argument for
this proxy.
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
proxy_factory
Optional. The type of collection to emulate is determined by
sniffing the target collection. If your collection type can't be
determined by duck typing or you'd like to use a different
collection implementation, you may supply a factory function to
produce those collections. Only applicable to non-scalar relationships.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
proxy_bulk_set
Optional, use with proxy_factory. See the _set() method for
details.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
:param info: optional, will be assigned to
:attr:`.AssociationProxy.info` if present.
.. versionadded:: 1.0.9
"""
self.target_collection = target_collection
@@ -131,36 +151,107 @@ class AssociationProxy(object):
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.scalar = None
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
if info:
self.info = info
@property
def remote_attr(self):
"""The 'remote' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.local_attr`
:attr:`.AssociationProxy.remote_attr`
"""
return (self.local_attr, self.remote_attr)
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@property
@util.memoized_property
def target_class(self):
"""The class the proxy is attached to."""
"""The intermediary class handled by this :class:`.AssociationProxy`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
return self._get_property().mapper.class_
def _target_is_scalar(self):
return not self._get_property().uselist
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxy` proxies a scalar
relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return not self._get_property().\
mapper.get_property(self.value_attr).uselist
@util.memoized_property
def _target_is_object(self):
return getattr(self.target_class, self.value_attr).impl.uses_objects
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
elif self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
return self._scalar_get(getattr(obj, self.target_collection))
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
@@ -173,14 +264,10 @@ class AssociationProxy(object):
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
creator = self.creator and self.creator or self.target_class
@@ -209,7 +296,8 @@ class AssociationProxy(object):
def _default_getset(self, collection_class):
attr = self.value_attr
getter = operator.attrgetter(attr)
_getter = operator.attrgetter(attr)
getter = lambda target: _getter(target) if target is not None else None
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
@@ -221,21 +309,25 @@ class AssociationProxy(object):
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(lazy_collection, creator, self.value_attr, self)
return self.proxy_factory(
lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(lazy_collection, creator, getter, setter, self)
return _AssociationList(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(lazy_collection, creator, getter, setter, self)
return _AssociationDict(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(lazy_collection, creator, getter, setter, self)
return _AssociationSet(
lazy_collection, creator, getter, setter, self)
else:
raise exceptions.ArgumentError(
raise exc.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
@@ -248,7 +340,7 @@ class AssociationProxy(object):
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
@@ -263,28 +355,102 @@ class AssociationProxy(object):
elif self.collection_class is set:
proxy.update(values)
else:
raise exceptions.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
raise exc.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
return self._comparator.any(getattr(self.target_class, self.value_attr).has(criterion, **kwargs))
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
if self._value_is_scalar:
value_expr = getattr(
self.target_class, self.value_attr).has(
criterion, **kwargs)
else:
value_expr = getattr(
self.target_class, self.value_attr).any(
criterion, **kwargs)
else:
value_expr = criterion
# check _value_is_scalar here, otherwise
# we're scalar->scalar - call .any() so that
# the "can't call any() on a scalar" msg is raised.
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
value_expr
)
else:
return self._comparator.any(
value_expr
)
def has(self, criterion=None, **kwargs):
return self._comparator.has(getattr(self.target_class, self.value_attr).has(criterion, **kwargs))
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
return self._comparator.has(
getattr(self.target_class, self.value_attr).
has(criterion, **kwargs)
)
else:
if criterion is not None or kwargs:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use ==")
return self._comparator.has()
def contains(self, obj):
return self._comparator.any(**{self.value_attr: obj})
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
else:
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
return self._comparator.has(**{self.value_attr: obj})
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
return not_(self.__eq__(obj))
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj)
class _lazy_collection(object):
@@ -295,22 +461,23 @@ class _lazy_collection(object):
def __call__(self):
obj = self.ref()
if obj is None:
raise exceptions.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
raise exc.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj':self.ref(), 'target':self.target}
return {'obj': self.ref(), 'target': self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
@@ -344,17 +511,20 @@ class _AssociationCollection(object):
def __len__(self):
return len(self.col)
def __nonzero__(self):
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {'parent':self.parent, 'lazy_collection':self.lazy_collection}
return {'parent': self.parent, 'lazy_collection': self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
@@ -368,7 +538,10 @@ class _AssociationList(_AssociationCollection):
return self.setter(object, value)
def __getitem__(self, index):
return self._get(self.col[index])
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
@@ -382,11 +555,12 @@ class _AssociationList(_AssociationCollection):
stop = index.stop
step = index.step or 1
rng = range(index.start or 0, stop, step)
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[index.start]
i = index.start
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
@@ -429,7 +603,7 @@ class _AssociationList(_AssociationCollection):
for member in self.col:
yield self._get(member)
raise StopIteration
return
def append(self, value):
item = self._create(value)
@@ -437,7 +611,7 @@ class _AssociationList(_AssociationCollection):
def count(self, value):
return sum([1 for _ in
itertools.ifilter(lambda v: v == value, iter(self))])
util.itertools_filter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
@@ -536,14 +710,16 @@ class _AssociationList(_AssociationCollection):
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
@@ -577,7 +753,7 @@ class _AssociationDict(_AssociationCollection):
return key in self.col
def __iter__(self):
return self.col.iterkeys()
return iter(self.col.keys())
def clear(self):
self.col.clear()
@@ -622,24 +798,27 @@ class _AssociationDict(_AssociationCollection):
def keys(self):
return self.col.keys()
def iterkeys(self):
return self.col.iterkeys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return [ self._get(member) for member in self.col.values() ]
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def itervalues(self):
for key in self.col:
yield self._get(self.col[key])
raise StopIteration
def iterkeys(self):
return self.col.iterkeys()
def items(self):
return [(k, self._get(self.col[k])) for k in self]
def values(self):
return [self._get(member) for member in self.col.values()]
def iteritems(self):
for key in self.col:
yield (key, self._get(self.col[key]))
raise StopIteration
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
@@ -658,11 +837,20 @@ class _AssociationDict(_AssociationCollection):
len(a))
elif len(a) == 1:
seq_or_map = a[0]
for item in seq_or_map:
if isinstance(item, tuple):
self[item[0]] = item[1]
else:
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, 'keys'):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples")
for key, value in kw:
self[key] = value
@@ -673,9 +861,9 @@ class _AssociationDict(_AssociationCollection):
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(dict, func_name)):
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
@@ -695,12 +883,14 @@ class _AssociationSet(_AssociationCollection):
def __len__(self):
return len(self.col)
def __nonzero__(self):
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
@@ -717,7 +907,7 @@ class _AssociationSet(_AssociationCollection):
"""
for member in self.col:
yield self._get(member)
raise StopIteration
return
def add(self, value):
if value not in self:
@@ -871,8 +1061,8 @@ class _AssociationSet(_AssociationCollection):
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(set, func_name)):
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func

View File

@@ -1,31 +1,39 @@
"""Provides an API for creation of custom ClauseElements and compilers.
# ext/compiler.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more :class:`~sqlalchemy.sql.expression.ClauseElement`
subclasses and one or more callables defining its compilation::
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print str(s)
Produces::
SELECT [x], [y]
@@ -50,22 +58,25 @@ invoked for the dialect in use::
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name)
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used.
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the :class:`~sqlalchemy.engine.base.Compiled`
object in use. This object can be inspected for any information about the
in-progress compilation, including ``compiler.dialect``,
``compiler.statement`` etc. The :class:`~sqlalchemy.sql.compiler.SQLCompiler`
and :class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
@@ -80,36 +91,110 @@ method which can be used for compilation of embedded attributes::
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print insert
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)"
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers - ``SQLCompiler``
and ``DDLCompiler``. A common need is to access the compilation rules of SQL expressions
from within a DDL expression. The ``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as below where we generate a CHECK
constraint that embeds a SQL expression::
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(constraint.expression)
ddlcompiler.sql_compiler.process(
constraint.expression, literal_binds=True)
)
Above, we add an additional flag to the process step as called by
:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This
indicates that any SQL expression which refers to a :class:`.BindParameter`
object or other "literal" object such as those which refer to strings or
integers should be rendered **in-place**, rather than being referred to as
a bound parameter; when emitting DDL, bound parameters are typically not
supported.
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When overriding
the compilation of a built in SQL construct, the @compiles decorator is invoked upon
the appropriate class (be sure to use the class, i.e. ``Insert`` or ``Select``, instead of the creation function such as ``insert()`` or ``select()``).
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation routine,
use the appropriate visit_XXX method - this because compiler.process() will call upon the
overriding routine and cause an endless loop. Such as, to add "prefix" to all insert statements::
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@@ -117,38 +202,77 @@ overriding routine and cause an endless loop. Such as, to add "prefix" to all
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when compiled.
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy expression constructs. To make this easier, the expression and schema packages feature a set of "bases" intended for common tasks. A synopsis is as follows:
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.expression.FunctionElement` - This is a hybrid of a
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct.
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
@@ -156,39 +280,195 @@ A big part of using the compiler extension is subclassing SQLAlchemy expression
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be
used with any expression class that represents a "standalone" SQL statement that
can be passed directly to an ``execute()`` method. It is already implicit
within ``DDLElement`` and ``FunctionElement``.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For PostgreSQL and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - its equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
compiler.process(arg1),
compiler.process(arg2),
compiler.process(arg1),
compiler.process(arg2),
)
Example usage::
Session.query(Account).\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`.ClauseElement` type."""
def decorate(fn):
existing = getattr(class_, '_compiler_dispatcher', None)
# get an existing @compiles handler
existing = class_.__dict__.get('_compiler_dispatcher', None)
# get the original handler. All ClauseElement classes have one
# of these, but some TypeEngine classes will not.
existing_dispatch = getattr(class_, '_compiler_dispatch', None)
if not existing:
existing = _dispatcher()
if existing_dispatch:
def _wrap_existing_dispatch(element, compiler, **kw):
try:
return existing_dispatch(element, compiler, **kw)
except exc.UnsupportedCompilationError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
existing.specs['default'] = _wrap_existing_dispatch
# TODO: why is the lambda needed ?
setattr(class_, '_compiler_dispatch', lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatch',
lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatcher', existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs['default'] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`.ClauseElement` type."""
if hasattr(class_, '_compiler_dispatcher'):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
fn = self.specs['default']
try:
fn = self.specs['default']
except KeyError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)

View File

@@ -1,5 +1,6 @@
# horizontal_shard.py
# Copyright (C) the SQLAlchemy authors and contributors
# ext/horizontal_shard.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -9,105 +10,54 @@
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distrbution.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
import sqlalchemy.exceptions as sa_exc
from sqlalchemy import util
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.query import Query
from .. import util
from ..orm.session import Session
from ..orm.query import Query
__all__ = ['ShardedSession', 'ShardedQuery']
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, **kwargs):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a
SQL clause, returns a shard ID. This id may be based off of the
attributes present within the object, or on some round-robin
scheme. If the scheme is based on a selection, it should set
whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity values, which
should return a list of shard ids where the ID might reside. The
databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids where the query
should be issued. Results from all shards returned will be combined
together into a single listing.
:param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.base.Engine`
objects.
"""
super(ShardedSession, self).__init__(**kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self._mapper_flush_opts = {'connection_callable':self.connection}
self._query_cls = ShardedQuery
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(mapper,
shard_id=shard_id,
instance=instance).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance, clause=clause)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
q = self._clone()
q._shard_id = shard_id
return q
def _execute_and_instances(self, context):
if self._shard_id is not None:
result = self.session.connection(
mapper=self._mapper_zero(),
shard_id=self._shard_id).execute(context.statement, self._params)
def iter_for_shard(shard_id):
context.attributes['shard_id'] = shard_id
result = self._connection_from_session(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(
context.statement,
self._params)
return self.instances(result, context)
if self._shard_id is not None:
return iter_for_shard(self._shard_id)
else:
partial = []
for shard_id in self.query_chooser(self):
result = self.session.connection(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(context.statement, self._params)
partial = partial + list(self.instances(result, context))
# if some kind of in memory 'sorting'
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
@@ -122,4 +72,60 @@ class ShardedQuery(Query):
return o
else:
return None
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
query_cls=ShardedQuery, **kwargs):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self.connection_callable = self.connection
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper,
shard_id=shard_id,
instance=instance
).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None,
instance=None, clause=None, **kw):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance, clause=clause)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind

View File

@@ -1,58 +1,78 @@
"""A custom list that manages index/position information for its children.
# ext/orderinglist.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will intercept
list operations performed on a relationship collection and automatically
synchronize changes in list position with an attribute on the related objects.
(See :ref:`advdatamapping_entitycollections` for more information on the general pattern.)
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: Two tables that store slides in a presentation. Each slide
has a number of bullet points, displayed in order by the 'position'
column on the bullets table. These bullets can be inserted and re-ordered
by your end users, and you need to update the 'position' column of all
affected rows when changes are made.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
.. sourcecode:: python+sql
slides_table = Table('Slides', metadata,
Column('id', Integer, primary_key=True),
Column('name', String))
Base = declarative_base()
bullets_table = Table('Bullets', metadata,
Column('id', Integer, primary_key=True),
Column('slide_id', Integer, ForeignKey('Slides.id')),
Column('position', Integer),
Column('text', String))
class Slide(Base):
__tablename__ = 'slide'
class Slide(object):
pass
class Bullet(object):
pass
id = Column(Integer, primary_key=True)
name = Column(String)
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet, order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
bullets = relationship("Bullet", order_by="Bullet.position")
The standard relationship mapping will produce a list-like attribute on each Slide
containing all related Bullets, but coping with changes in ordering is totally
your responsibility. If you insert a Bullet into that list, there is no
magic- it won't have a position attribute unless you assign it it one, and
you'll need to manually renumber all the subsequent Bullets in the list to
accommodate the insert.
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
An ``orderinglist`` can automate this and manage the 'position' attribute on all
related bullets for you.
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
.. sourcecode:: python+sql
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet,
collection_class=ordering_list('position'),
order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
@@ -63,71 +83,98 @@ related bullets for you.
s.bullets[2].position
>>> 2
Use the ``ordering_list`` function to set up the ``collection_class`` on relationships
(as in the mapper example above). This implementation depends on the list
starting in the proper order, so be SURE to put an order_by on your relationship.
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning:: ``ordering_list`` only provides limited functionality when a primary
key column or unique column is the target of the sort. Since changing the order of
entries often means that two rows must trade values, this is not possible when
the value is constrained by a primary key or unique constraint, since one of the rows
would temporarily have to point to a third available value so that the other row
could take its old value. ``ordering_list`` doesn't do any of this for you,
nor does SQLAlchemy itself.
.. warning::
``ordering_list`` takes the name of the related object's ordering attribute as
an argument. By default, the zero-based integer index of the object's
position in the ``ordering_list`` is synchronized with the ordering attribute:
index 0 will get position 0, index 1 position 1, etc. To start numbering at 1
or some other integer, provide ``count_from=1``.
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
Ordering values are not limited to incrementing integers. Almost any scheme
can implemented by supplying a custom ``ordering_func`` that maps a Python list
index to any value you require.
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, allevating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from sqlalchemy.orm.collections import collection
from sqlalchemy import util
from ..orm.collections import collection, collection_adapter
from .. import util
__all__ = [ 'ordering_list' ]
__all__ = ['ordering_list']
def ordering_list(attr, count_from=None, **kw):
"""Prepares an OrderingList factory for use in mapper definitions.
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper relationship's
``collection_class`` option. Arguments are:
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
attr
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
count_from (optional)
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Passes along any keyword arguments to ``OrderingList`` constructor.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
@@ -139,8 +186,9 @@ def count_from_n_factory(start):
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keywrod arguments.
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
@@ -156,12 +204,13 @@ def _unsugar_count_from(**kw):
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
See the module and __init__ documentation for more details. The
``ordering_list`` factory function is used to configure ``OrderingList``
collections in ``mapper`` relationship definitions.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`.relationship` function.
"""
@@ -176,14 +225,14 @@ class OrderingList(list):
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
ordering_attr
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
ordering_func
Optional. A function that maps the position in the Python list to a
value to store in the ``ordering_attr``. Values returned are
usually (but need not be!) integers.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
@@ -194,7 +243,7 @@ class OrderingList(list):
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
reorder_on_append
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
@@ -208,7 +257,7 @@ class OrderingList(list):
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error. Spooky action at a distance.
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
@@ -270,7 +319,10 @@ class OrderingList(list):
def remove(self, entity):
super(OrderingList, self).remove(entity)
self._reorder()
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
@@ -286,8 +338,8 @@ class OrderingList(list):
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in xrange(start, stop, step):
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
@@ -297,7 +349,6 @@ class OrderingList(list):
super(OrderingList, self).__delitem__(index)
self._reorder()
# Py2K
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
@@ -305,11 +356,25 @@ class OrderingList(list):
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
# end Py2K
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj

View File

@@ -1,4 +1,11 @@
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
# ext/serializer.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
@@ -12,82 +19,73 @@ Usage is nearly the same as that of the standard Python pickle module::
from sqlalchemy.ext.serializer import loads, dumps
metadata = MetaData(bind=some_engine)
Session = scoped_session(sessionmaker())
# ... define mappers
query = Session.query(MyClass).filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
query = Session.query(MyClass).
filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
# pickle the query
serialized = dumps(query)
# unpickle. Pass in metadata + scoped_session
query2 = loads(serialized, metadata, Session)
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized directly.
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized
directly.
* Table metadata that is to be loaded entirely from the serialized structure (i.e. is
not already declared in the application). Regular pickle.loads()/dumps() can
be used to fully dump any ``MetaData`` object, typically one which was reflected
from an existing database at some previous point in time. The serializer module
is specifically for the opposite case, where the Table metadata is already present
in memory.
* Table metadata that is to be loaded entirely from the serialized structure
(i.e. is not already declared in the application). Regular
pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object,
typically one which was reflected from an existing database at some previous
point in time. The serializer module is specifically for the opposite case,
where the Table metadata is already present in memory.
"""
from sqlalchemy.orm import class_mapper, Query
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.attributes import QueryableAttribute
from sqlalchemy import Table, Column
from sqlalchemy.engine import Engine
from sqlalchemy.util import pickle
from ..orm import class_mapper
from ..orm.session import Session
from ..orm.mapper import Mapper
from ..orm.interfaces import MapperProperty
from ..orm.attributes import QueryableAttribute
from .. import Table, Column
from ..engine import Engine
from ..util import pickle, byte_buffer, b64encode, b64decode, text_type
import re
import base64
# Py3K
#from io import BytesIO as byte_buffer
# Py2K
from cStringIO import StringIO as byte_buffer
# end Py2K
# Py3K
#def b64encode(x):
# return base64.b64encode(x).decode('ascii')
#def b64decode(x):
# return base64.b64decode(x.encode('ascii'))
# Py2K
b64encode = base64.b64encode
b64decode = base64.b64decode
# end Py2K
__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads']
def Serializer(*args, **kw):
pickler = pickle.Pickler(*args, **kw)
def persistent_id(obj):
#print "serializing:", repr(obj)
# print "serializing:", repr(obj)
if isinstance(obj, QueryableAttribute):
cls = obj.impl.class_
key = obj.impl.key
id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls))
elif isinstance(obj, Mapper) and not obj.non_primary:
id = "mapper:" + b64encode(pickle.dumps(obj.class_))
elif isinstance(obj, MapperProperty) and not obj.parent.non_primary:
id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \
":" + obj.key
elif isinstance(obj, Table):
id = "table:" + str(obj)
id = "table:" + text_type(obj.key)
elif isinstance(obj, Column) and isinstance(obj.table, Table):
id = "column:" + str(obj.table) + ":" + obj.key
id = "column:" + \
text_type(obj.table.key) + ":" + text_type(obj.key)
elif isinstance(obj, Session):
id = "session:"
elif isinstance(obj, Engine):
@@ -95,15 +93,17 @@ def Serializer(*args, **kw):
else:
return None
return id
pickler.persistent_id = persistent_id
return pickler
our_ids = re.compile(r'(mapper|table|column|session|attribute|engine):(.*)')
our_ids = re.compile(
r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)')
def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler = pickle.Unpickler(file)
def get_engine():
if engine:
return engine
@@ -113,9 +113,9 @@ def Deserializer(file, metadata=None, scoped_session=None, engine=None):
return metadata.bind
else:
return None
def persistent_load(id):
m = our_ids.match(id)
m = our_ids.match(text_type(id))
if not m:
return None
else:
@@ -127,6 +127,10 @@ def Deserializer(file, metadata=None, scoped_session=None, engine=None):
elif type_ == "mapper":
cls = pickle.loads(b64decode(args))
return class_mapper(cls)
elif type_ == "mapperprop":
mapper, keyname = args.split(':')
cls = pickle.loads(b64decode(mapper))
return class_mapper(cls).attrs[keyname]
elif type_ == "table":
return metadata.tables[args]
elif type_ == "column":
@@ -141,15 +145,15 @@ def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler.persistent_load = persistent_load
return unpickler
def dumps(obj, protocol=0):
buf = byte_buffer()
pickler = Serializer(buf, protocol)
pickler.dump(obj)
return buf.getvalue()
def loads(data, metadata=None, scoped_session=None, engine=None):
buf = byte_buffer(data)
unpickler = Deserializer(buf, metadata, scoped_session, engine)
return unpickler.load()