Updated SqlAlchemy + the new files

This commit is contained in:
2017-04-15 16:33:29 +00:00
parent e3267d4bda
commit 4669737fe3
134 changed files with 66374 additions and 4528 deletions

View File

@@ -0,0 +1,314 @@
# postgresql/array.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ...sql import expression, operators
from ...sql.base import SchemaEventTarget
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
if _assume_scalar or operator is operators.getitem:
# if getitem->slice were called, Indexable produces
# a Slice object from that
assert isinstance(obj, int)
return expression.BindParameter(
None, obj, _compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type, unique=True)
else:
return array([
self._bind_param(operator, o, _assume_scalar=True, type_=type_)
for o in obj])
def self_group(self, against=None):
if (against in (
operators.any_op, operators.all_op, operators.getitem)):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
"""PostgreSQL ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
a subclass of the core :class:`.types.ARRAY` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
as the core :class:`.types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
core :class:`.types.ARRAY` type, including support for "dimensions", indexed
access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`.postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`.types.ARRAY` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and PostgreSQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
ischema_names['_array'] = ARRAY

View File

@@ -0,0 +1,213 @@
# postgresql/on_conflict.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ...sql.elements import ClauseElement, _literal_as_binds
from ...sql.dml import Insert as StandardInsert
from ...sql.expression import alias
from ...sql import schema
from ...util.langhelpers import public_factory
from ...sql.base import _generative
from ... import util
from . import ext
__all__ = ('Insert', 'insert')
class Insert(StandardInsert):
"""PostgreSQL-specific implementation of INSERT.
Adds methods for PG-specific syntaxes such as ON CONFLICT.
.. versionadded:: 1.1
"""
@util.memoized_property
def excluded(self):
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
PG's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenaceable.
.. seealso::
:ref:`postgresql_insert_on_conflict` - example of how
to use :attr:`.Insert.excluded`
"""
return alias(self.table, name='excluded').columns
@_generative
def on_conflict_do_update(
self,
constraint=None, index_elements=None,
index_where=None, set_=None, where=None):
"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
Either the ``constraint`` or ``index_elements`` argument is
required, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set_:
Required argument. A dictionary or other mapping object
with column names as keys and expressions or literals as values,
specifying the ``SET`` actions to take.
If the target :class:`.Column` specifies a ".key" attribute distinct
from the column name, that key should be used.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoUpdate(
constraint, index_elements, index_where, set_, where)
return self
@_generative
def on_conflict_do_nothing(
self,
constraint=None, index_elements=None, index_where=None):
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
The ``constraint`` and ``index_elements`` arguments
are optional, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoNothing(
constraint, index_elements, index_where)
return self
insert = public_factory(Insert, '.dialects.postgresql.insert')
class OnConflictClause(ClauseElement):
def __init__(
self,
constraint=None,
index_elements=None,
index_where=None):
if constraint is not None:
if not isinstance(constraint, util.string_types) and \
isinstance(constraint, (
schema.Index, schema.Constraint,
ext.ExcludeConstraint)):
constraint = getattr(constraint, 'name') or constraint
if constraint is not None:
if index_elements is not None:
raise ValueError(
"'constraint' and 'index_elements' are mutually exclusive")
if isinstance(constraint, util.string_types):
self.constraint_target = constraint
self.inferred_target_elements = None
self.inferred_target_whereclause = None
elif isinstance(constraint, schema.Index):
index_elements = constraint.expressions
index_where = \
constraint.dialect_options['postgresql'].get("where")
elif isinstance(constraint, ext.ExcludeConstraint):
index_elements = constraint.columns
index_where = constraint.where
else:
index_elements = constraint.columns
index_where = \
constraint.dialect_options['postgresql'].get("where")
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
elif constraint is None:
self.constraint_target = self.inferred_target_elements = \
self.inferred_target_whereclause = None
class OnConflictDoNothing(OnConflictClause):
__visit_name__ = 'on_conflict_do_nothing'
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = 'on_conflict_do_update'
def __init__(
self,
constraint=None,
index_elements=None,
index_where=None,
set_=None,
where=None):
super(OnConflictDoUpdate, self).__init__(
constraint=constraint,
index_elements=index_elements,
index_where=index_where)
if self.inferred_target_elements is None and \
self.constraint_target is None:
raise ValueError(
"Either constraint or index_elements, "
"but not both, must be specified unless DO NOTHING")
if (not isinstance(set_, dict) or not set_):
raise ValueError("set parameter must be a non-empty dictionary")
self.update_values_to_set = [
(key, value)
for key, value in set_.items()
]
self.update_whereclause = where

View File

@@ -0,0 +1,218 @@
# postgresql/ext.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ...sql import expression
from ...sql import elements
from ...sql import functions
from ...sql.schema import ColumnCollectionConstraint
from .array import ARRAY
class aggregate_order_by(expression.ColumnElement):
"""Represent a PostgreSQL aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionadded:: 1.1
.. seealso::
:class:`.array_agg`
"""
__visit_name__ = 'aggregate_order_by'
def __init__(self, target, order_by):
self.target = elements._literal_as_binds(target)
self.order_by = elements._literal_as_binds(order_by)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/\
static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
where = None
def __init__(self, *elements, **kw):
r"""
Create an :class:`.ExcludeConstraint` object.
E.g.::
const = ExcludeConstraint(
(Column('period'), '&&'),
(Column('group'), '='),
where=(Column('group') != 'some group')
)
The constraint is normally embedded into the :class:`.Table` construct
directly, or added later using :meth:`.append_constraint`::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('period', TSRANGE()),
Column('group', String)
)
some_table.append_constraint(
ExcludeConstraint(
(some_table.c.period, '&&'),
(some_table.c.group, '='),
where=some_table.c.group != 'some group',
name='some_table_excl_const'
)
)
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
"column" is a SQL expression element or a raw SQL string, most
typically a :class:`.Column` object,
and "operator" is a string containing the operator to use.
.. note::
A plain string passed for the value of "column" is interpreted
as an arbitrary SQL expression; when passing a plain string,
any necessary quoting and escaping syntaxes must be applied
manually. In order to specify a column name when a
:class:`.Column` object is not available, while ensuring that
any necessary quoting rules take effect, an ad-hoc
:class:`.Column` or :func:`.sql.expression.column` object may
be used.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional SQL expression construct or literal SQL string.
If set, emit WHERE <predicate> when issuing DDL
for this constraint.
.. note::
A plain string passed here is interpreted as an arbitrary SQL
expression; when passing a plain string, any necessary quoting
and escaping syntaxes must be applied manually.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
self._extract_col_expression_collection(expressions),
operators
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
expr = expression._literal_as_text(expr)
render_exprs.append(
(expr, name, operator)
)
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
)
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where is not None:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
where=self.where,
using=self.using)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""PostgreSQL-specific form of :class:`.array_agg`, ensures
return type is :class:`.postgresql.ARRAY` and not
the plain :class:`.types.ARRAY`.
.. versionadded:: 1.1
"""
kw['type_'] = ARRAY(functions._type_from_args(arg))
return functions.func.array_agg(*arg, **kw)

View File

@@ -0,0 +1,420 @@
# postgresql/hstore.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .base import ischema_names
from .array import ARRAY
from ... import types as sqltypes
from ...sql import functions as sqlfunc
from ...sql import operators
from ... import util
__all__ = ('HSTORE', 'hstore')
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
GETITEM = operators.custom_op(
"->", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_KEY = operators.custom_op(
"?", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ALL = operators.custom_op(
"?&", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ANY = operators.custom_op(
"?|", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINS = operators.custom_op(
"@>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINED_BY = operators.custom_op(
"<@", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the PostgreSQL HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', HSTORE)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data['some key'] == 'some value'
* Containment operations::
data_table.c.data.has_key('some key')
data_table.c.data.has_all(['one', 'two', 'three'])
* Concatenation::
data_table.c.data + {"k1": "v1"}
For a full list of special methods see
:class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow "in-place" changes to the
dictionary, e.g. addition of new keys or replacement/removal of existing
keys to/from the current dictionary, to produce events which will be
detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = 'data_table'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data['some_key'] = 'some value'
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
will not be alerted to any changes to the contents of an existing
dictionary, unless that dictionary value is re-assigned to the
HSTORE-attribute itself, thus generating a change event.
.. versionadded:: 0.8
.. seealso::
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
"""
__visit_name__ = 'HSTORE'
hashable = False
text_type = sqltypes.Text()
def __init__(self, text_type=None):
"""Construct a new :class:`.HSTORE`.
:param text_type: the type that should be used for indexed values.
Defaults to :class:`.types.Text`.
.. versionadded:: 1.1.0
"""
if text_type is not None:
self.text_type = text_type
class Comparator(
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def _setup_getitem(self, index):
return GETITEM, index, self.type.text_type
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
the key. Note that the key may be a SQLA expression.
"""
return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
given key deleted. Note that the key may be a SQLA expression.
"""
if isinstance(key, dict):
key = _serialize_hstore(key)
return _HStoreDeleteFunction(self.expr, key)
def slice(self, array):
"""HStore expression. Returns a subset of an hstore defined by
array of keys.
"""
return _HStoreSliceFunction(self.expr, array)
def keys(self):
"""Text array expression. Returns array of keys."""
return _HStoreKeysFunction(self.expr)
def vals(self):
"""Text array expression. Returns array of values."""
return _HStoreValsFunction(self.expr)
def array(self):
"""Text array expression. Returns array of alternating keys and
values.
"""
return _HStoreArrayFunction(self.expr)
def matrix(self):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
comparator_factory = Comparator
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
else:
return value
else:
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
return process
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
else:
return value
else:
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
return process
ischema_names['hstore'] = HSTORE
class hstore(sqlfunc.GenericFunction):
"""Construct an hstore value within a SQL expression using the
PostgreSQL ``hstore()`` function.
The :class:`.hstore` function accepts one or two arguments as described
in the PostgreSQL documentation.
E.g.::
from sqlalchemy.dialects.postgresql import array, hstore
select([hstore('key1', 'value1')])
select([
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
)
])
.. versionadded:: 0.8
.. seealso::
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
"""
type = HSTORE
name = 'hstore'
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
type = sqltypes.Boolean
name = 'defined'
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
type = HSTORE
name = 'delete'
class _HStoreSliceFunction(sqlfunc.GenericFunction):
type = HSTORE
name = 'slice'
class _HStoreKeysFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'akeys'
class _HStoreValsFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'avals'
class _HStoreArrayFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_array'
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_matrix'
#
# parsing. note that none of this is used with the psycopg2 backend,
# which provides its own native extensions.
#
# My best guess at the parsing rules of hstore literals, since no formal
# grammar is given. This is mostly reverse engineered from PG's input parser
# behavior.
HSTORE_PAIR_RE = re.compile(r"""
(
"(?P<key> (\\ . | [^"])* )" # Quoted key
)
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
(
(?P<value_null> NULL ) # NULL value
| "(?P<value> (\\ . | [^"])* )" # Quoted value
)
""", re.VERBOSE)
HSTORE_DELIMITER_RE = re.compile(r"""
[ ]* , [ ]*
""", re.VERBOSE)
def _parse_error(hstore_str, pos):
"""format an unmarshalling error."""
ctx = 20
hslen = len(hstore_str)
parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
if len(parsed_tail) > ctx:
parsed_tail = '[...]' + parsed_tail[1:]
if len(residual) > ctx:
residual = residual[:-1] + '[...]'
return "After %r, could not parse residual at position %d: %r" % (
parsed_tail, pos, residual)
def _parse_hstore(hstore_str):
"""Parse an hstore from its literal string representation.
Attempts to approximate PG's hstore input parsing rules as closely as
possible. Although currently this is not strictly necessary, since the
current implementation of hstore's output syntax is stricter than what it
accepts as input, the documentation makes no guarantees that will always
be the case.
"""
result = {}
pos = 0
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
key = pair_match.group('key').replace(r'\"', '"').replace(
"\\\\", "\\")
if pair_match.group('value_null'):
value = None
else:
value = pair_match.group('value').replace(
r'\"', '"').replace("\\\\", "\\")
result[key] = value
pos += pair_match.end()
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
if delim_match is not None:
pos += delim_match.end()
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
if pos != len(hstore_str):
raise ValueError(_parse_error(hstore_str, pos))
return result
def _serialize_hstore(val):
"""Serialize a dictionary into an hstore literal. Keys and values must
both be strings (except None for values).
"""
def esc(s, position):
if position == 'value' and s is None:
return 'NULL'
elif isinstance(s, util.string_types):
return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
else:
raise ValueError("%r in %s position is not a string." %
(s, position))
return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
for k, v in val.items())

View File

@@ -0,0 +1,301 @@
# postgresql/json.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import json
import collections
from .base import ischema_names, colspecs
from ... import types as sqltypes
from ...sql import operators
from ...sql import elements
from ... import util
__all__ = ('JSON', 'JSONB')
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
ASTEXT = operators.custom_op(
"->>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
JSONPATH_ASTEXT = operators.custom_op(
"#>>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_KEY = operators.custom_op(
"?", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ALL = operators.custom_op(
"?&", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
HAS_ANY = operators.custom_op(
"?|", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINS = operators.custom_op(
"@>", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
CONTAINED_BY = operators.custom_op(
"<@", precedence=idx_precedence, natural_self_precedent=True,
eager_grouping=True
)
class JSONPathType(sqltypes.JSON.JSONPathType):
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
assert isinstance(value, collections.Sequence)
tokens = [util.text_type(elem)for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
assert isinstance(value, collections.Sequence)
tokens = [util.text_type(elem)for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
class JSON(sqltypes.JSON):
"""Represent the PostgreSQL JSON type.
This type is a specialization of the Core-level :class:`.types.JSON`
type. Be sure to read the documentation for :class:`.types.JSON` for
important tips regarding treatment of NULL values and ORM use.
.. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a PostgreSQL-
specific specialization of the new :class:`.types.JSON` type.
The operators provided by the PostgreSQL version of :class:`.JSON`
include:
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
data_table.c.data[5]
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
* Index operations with CAST
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
data_table.c.data['some key'].astext.cast(Integer) == 5
* Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Path index operations returning text (the ``#>>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
'some value'
.. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
JSON objects now requires that the :attr:`.JSON.Comparator.astext`
modifier be called explicitly, if the cast works only from a textual
string.
Index operations return an expression object whose type defaults to
:class:`.JSON` by default, so that further JSON-oriented instructions
may be called upon the result type.
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
or per-connection level. E.g.::
engine = create_engine("postgresql://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn
)
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
.. seealso::
:class:`.types.JSON` - Core level JSON type
:class:`.JSONB`
"""
astext_type = sqltypes.Text()
def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
.. seealso::
:attr:`.JSON.NULL`
:param astext_type: the type to use for the
:attr:`.JSON.Comparator.astext`
accessor on indexed attributes. Defaults to :class:`.types.Text`.
.. versionadded:: 1.1
"""
super(JSON, self).__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
@property
def astext(self):
"""On an indexed expression, use the "astext" (e.g. "->>")
conversion when rendered in SQL.
E.g.::
select([data_table.c.data['some key'].astext])
.. seealso::
:meth:`.ColumnElement.cast`
"""
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
return self.expr.left.operate(
JSONPATH_ASTEXT,
self.expr.right, result_type=self.type.astext_type)
else:
return self.expr.left.operate(
ASTEXT, self.expr.right, result_type=self.type.astext_type)
comparator_factory = Comparator
colspecs[sqltypes.JSON] = JSON
ischema_names['json'] = JSON
class JSONB(JSON):
"""Represent the PostgreSQL JSONB type.
The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSONB)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
The :class:`.JSONB` type includes all operations provided by
:class:`.JSON`, including the same behaviors for indexing operations.
It also adds additional operators specific to JSONB, including
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
and :meth:`.JSONB.Comparator.contained_by`.
Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
in-place changes when used with the ORM, unless the
:mod:`sqlalchemy.ext.mutable` extension is used.
Custom serializers and deserializers
are shared with the :class:`.JSON` class, using the ``json_serializer``
and ``json_deserializer`` keyword arguments. These must be specified
at the dialect level using :func:`.create_engine`. When using
psycopg2, the serializers are associated with the jsonb type using
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
in the same way that ``psycopg2.extras.register_default_json`` is used
to register these handlers with the json type.
.. versionadded:: 0.9.7
.. seealso::
:class:`.JSON`
"""
__visit_name__ = 'JSONB'
class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
ischema_names['jsonb'] = JSONB

View File

@@ -0,0 +1,61 @@
# testing/engines.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+psycopg2cffi
:name: psycopg2cffi
:dbapi: psycopg2cffi
:connectstring: \
postgresql+psycopg2cffi://user:password@host:port/dbname\
[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2cffi/
``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
layer. This makes it suitable for use in e.g. PyPy. Documentation
is as per ``psycopg2``.
.. versionadded:: 1.0.0
.. seealso::
:mod:`sqlalchemy.dialects.postgresql.psycopg2`
"""
from .psycopg2 import PGDialect_psycopg2
class PGDialect_psycopg2cffi(PGDialect_psycopg2):
driver = 'psycopg2cffi'
supports_unicode_statements = True
# psycopg2cffi's first release is 2.5.0, but reports
# __version__ as 2.4.4. Subsequent releases seem to have
# fixed this.
FEATURE_VERSION_MAP = dict(
native_json=(2, 4, 4),
native_jsonb=(2, 7, 1),
sane_multi_rowcount=(2, 4, 4),
array_oid=(2, 4, 4),
hstore_adapter=(2, 4, 4)
)
@classmethod
def dbapi(cls):
return __import__('psycopg2cffi')
@classmethod
def _psycopg2_extensions(cls):
root = __import__('psycopg2cffi', fromlist=['extensions'])
return root.extensions
@classmethod
def _psycopg2_extras(cls):
root = __import__('psycopg2cffi', fromlist=['extras'])
return root.extras
dialect = PGDialect_psycopg2cffi

View File

@@ -0,0 +1,243 @@
# postgresql/pygresql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pygresql
:name: pygresql
:dbapi: pgdb
:connectstring: postgresql+pygresql://user:password@host:port/dbname\
[?key=value&key=value...]
:url: http://www.pygresql.org/
"""
import decimal
import re
from ... import exc, processors, util
from ...types import Numeric, JSON as Json
from ...sql.elements import Null
from .base import PGDialect, PGCompiler, PGIdentifierPreparer, \
_DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES, UUID
from .hstore import HSTORE
from .json import JSON, JSONB
class _PGNumeric(Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not isinstance(coltype, int):
coltype = coltype.oid
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# PyGreSQL returns Decimal natively for 1700 (numeric)
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# PyGreSQL returns float natively for 701 (float8)
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if not dialect.has_native_hstore:
return super(_PGHStore, self).bind_processor(dialect)
hstore = dialect.dbapi.Hstore
def process(value):
if isinstance(value, dict):
return hstore(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_hstore:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSON, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSONB, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not dialect.has_native_uuid:
return super(_PGUUID, self).bind_processor(dialect)
uuid = dialect.dbapi.Uuid
def process(value):
if value is None:
return None
if isinstance(value, (str, bytes)):
if len(value) == 16:
return uuid(bytes=value)
return uuid(value)
if isinstance(value, int):
return uuid(int=value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_uuid:
return super(_PGUUID, self).result_processor(dialect, coltype)
if not self.as_uuid:
def process(value):
if value is not None:
return str(value)
return process
class _PGCompiler(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class _PGIdentifierPreparer(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_pygresql(PGDialect):
driver = 'pygresql'
statement_compiler = _PGCompiler
preparer = _PGIdentifierPreparer
@classmethod
def dbapi(cls):
import pgdb
return pgdb
colspecs = util.update_copy(
PGDialect.colspecs,
{
Numeric: _PGNumeric,
HSTORE: _PGHStore,
Json: _PGJSON,
JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
}
)
def __init__(self, **kwargs):
super(PGDialect_pygresql, self).__init__(**kwargs)
try:
version = self.dbapi.version
m = re.match(r'(\d+)\.(\d+)', version)
version = (int(m.group(1)), int(m.group(2)))
except (AttributeError, ValueError, TypeError):
version = (0, 0)
self.dbapi_version = version
if version < (5, 0):
has_native_hstore = has_native_json = has_native_uuid = False
if version != (0, 0):
util.warn("PyGreSQL is only fully supported by SQLAlchemy"
" since version 5.0.")
else:
self.supports_unicode_statements = True
self.supports_unicode_binds = True
has_native_hstore = has_native_json = has_native_uuid = True
self.has_native_hstore = has_native_hstore
self.has_native_json = has_native_json
self.has_native_uuid = has_native_uuid
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['host'] = '%s:%s' % (
opts.get('host', '').rsplit(':', 1)[0], opts.pop('port'))
opts.update(url.query)
return [], opts
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
if not connection:
return False
try:
connection = connection.connection
except AttributeError:
pass
else:
if not connection:
return False
try:
return connection.closed
except AttributeError: # PyGreSQL < 5.0
return connection._cnx is None
return False
dialect = PGDialect_pygresql

View File

@@ -0,0 +1,168 @@
# Copyright (C) 2013-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ... import types as sqltypes
__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
class RangeOperators(object):
"""
This mixin provides functionality for the Range Operators
listed in Table 9-44 of the `postgres documentation`__ for Range
Functions and Operators. It is used by all the range types
provided in the ``postgres`` dialect and can likely be used for
any range types you create yourself.
__ http://www.postgresql.org/docs/devel/static/functions-range.html
No extra support is provided for the Range Functions listed in
Table 9-45 of the postgres documentation. For these, the normal
:func:`~sqlalchemy.sql.expression.func` object should be used.
.. versionadded:: 0.8.2 Support for PostgreSQL RANGE operations.
"""
class comparator_factory(sqltypes.Concatenable.Comparator):
"""Define comparison operations for range types."""
def __ne__(self, other):
"Boolean expression. Returns true if two ranges are not equal"
return self.expr.op('<>')(other)
def contains(self, other, **kw):
"""Boolean expression. Returns true if the right hand operand,
which can be an element or a range, is contained within the
column.
"""
return self.expr.op('@>')(other)
def contained_by(self, other):
"""Boolean expression. Returns true if the column is contained
within the right hand operand.
"""
return self.expr.op('<@')(other)
def overlaps(self, other):
"""Boolean expression. Returns true if the column overlaps
(has points in common with) the right hand operand.
"""
return self.expr.op('&&')(other)
def strictly_left_of(self, other):
"""Boolean expression. Returns true if the column is strictly
left of the right hand operand.
"""
return self.expr.op('<<')(other)
__lshift__ = strictly_left_of
def strictly_right_of(self, other):
"""Boolean expression. Returns true if the column is strictly
right of the right hand operand.
"""
return self.expr.op('>>')(other)
__rshift__ = strictly_right_of
def not_extend_right_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend right of the range in the operand.
"""
return self.expr.op('&<')(other)
def not_extend_left_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend left of the range in the operand.
"""
return self.expr.op('&>')(other)
def adjacent_to(self, other):
"""Boolean expression. Returns true if the range in the column
is adjacent to the range in the operand.
"""
return self.expr.op('-|-')(other)
def __add__(self, other):
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contigous.
"""
return self.expr.op('+')(other)
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL INT4RANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'INT4RANGE'
ischema_names['int4range'] = INT4RANGE
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL INT8RANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'INT8RANGE'
ischema_names['int8range'] = INT8RANGE
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL NUMRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'NUMRANGE'
ischema_names['numrange'] = NUMRANGE
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL DATERANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'DATERANGE'
ischema_names['daterange'] = DATERANGE
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL TSRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'TSRANGE'
ischema_names['tsrange'] = TSRANGE
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL TSTZRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'TSTZRANGE'
ischema_names['tstzrange'] = TSTZRANGE