2017-04-15 18:33:29 +02:00
|
|
|
# mssql/base.py
|
|
|
|
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
|
|
|
|
# <see AUTHORS file>
|
|
|
|
#
|
|
|
|
# This module is part of SQLAlchemy and is released under
|
|
|
|
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
"""
|
|
|
|
.. dialect:: mssql
|
|
|
|
:name: Microsoft SQL Server
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
Auto Increment Behavior
|
|
|
|
-----------------------
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
SQL Server provides so-called "auto incrementing" behavior using the
|
|
|
|
``IDENTITY`` construct, which can be placed on an integer primary key.
|
|
|
|
SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
|
|
|
|
described at :paramref:`.Column.autoincrement`; this means
|
|
|
|
that by default, the first integer primary key column in a :class:`.Table`
|
|
|
|
will be considered to be the identity column and will generate DDL as such::
|
|
|
|
|
|
|
|
from sqlalchemy import Table, MetaData, Column, Integer
|
|
|
|
|
|
|
|
m = MetaData()
|
|
|
|
t = Table('t', m,
|
|
|
|
Column('id', Integer, primary_key=True),
|
|
|
|
Column('x', Integer))
|
|
|
|
m.create_all(engine)
|
|
|
|
|
|
|
|
The above example will generate DDL as:
|
|
|
|
|
|
|
|
.. sourcecode:: sql
|
|
|
|
|
|
|
|
CREATE TABLE t (
|
|
|
|
id INTEGER NOT NULL IDENTITY(1,1),
|
|
|
|
x INTEGER NULL,
|
|
|
|
PRIMARY KEY (id)
|
|
|
|
)
|
|
|
|
|
|
|
|
For the case where this default generation of ``IDENTITY`` is not desired,
|
|
|
|
specify ``autoincrement=False`` on all integer primary key columns::
|
|
|
|
|
|
|
|
m = MetaData()
|
|
|
|
t = Table('t', m,
|
|
|
|
Column('id', Integer, primary_key=True, autoincrement=False),
|
|
|
|
Column('x', Integer))
|
|
|
|
m.create_all(engine)
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
|
|
|
|
An INSERT statement which refers to an explicit value for such
|
|
|
|
a column is prohibited by SQL Server, however SQLAlchemy will detect this
|
|
|
|
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
|
|
|
|
time. As this is not a high performing process, care should be taken to
|
|
|
|
set the ``autoincrement`` flag appropriately for columns that will not
|
|
|
|
actually require IDENTITY behavior.
|
|
|
|
|
|
|
|
Controlling "Start" and "Increment"
|
|
|
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
Specific control over the parameters of the ``IDENTITY`` value is supported
|
|
|
|
using the :class:`.schema.Sequence` object. While this object normally
|
|
|
|
represents an explicit "sequence" for supporting backends, on SQL Server it is
|
|
|
|
re-purposed to specify behavior regarding the identity column, including
|
|
|
|
support of the "start" and "increment" values::
|
|
|
|
|
|
|
|
from sqlalchemy import Table, Integer, Sequence, Column
|
|
|
|
|
|
|
|
Table('test', metadata,
|
2010-05-07 19:33:49 +02:00
|
|
|
Column('id', Integer,
|
2017-04-15 18:33:29 +02:00
|
|
|
Sequence('blah', start=100, increment=10),
|
|
|
|
primary_key=True),
|
2010-05-07 19:33:49 +02:00
|
|
|
Column('name', String(20))
|
2017-04-15 18:33:29 +02:00
|
|
|
).create(some_engine)
|
|
|
|
|
|
|
|
would yield:
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. sourcecode:: sql
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
CREATE TABLE test (
|
|
|
|
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
|
|
|
|
name VARCHAR(20) NULL,
|
|
|
|
)
|
|
|
|
|
|
|
|
Note that the ``start`` and ``increment`` values for sequences are
|
|
|
|
optional and will default to 1,1.
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
INSERT behavior
|
|
|
|
^^^^^^^^^^^^^^^^
|
|
|
|
|
|
|
|
Handling of the ``IDENTITY`` column at INSERT time involves two key
|
|
|
|
techniques. The most common is being able to fetch the "last inserted value"
|
|
|
|
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
|
|
|
|
implicitly in many cases, most importantly within the ORM.
|
|
|
|
|
|
|
|
The process for fetching this value has several variants:
|
|
|
|
|
|
|
|
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
|
|
|
|
statements on SQL Server in order to get newly generated primary key values:
|
|
|
|
|
|
|
|
.. sourcecode:: sql
|
|
|
|
|
|
|
|
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
|
|
|
|
|
|
|
|
* When RETURNING is not available or has been disabled via
|
|
|
|
``implicit_returning=False``, either the ``scope_identity()`` function or
|
|
|
|
the ``@@identity`` variable is used; behavior varies by backend:
|
|
|
|
|
|
|
|
* when using PyODBC, the phrase ``; select scope_identity()`` will be
|
|
|
|
appended to the end of the INSERT statement; a second result set will be
|
|
|
|
fetched in order to receive the value. Given a table as::
|
|
|
|
|
|
|
|
t = Table('t', m, Column('id', Integer, primary_key=True),
|
|
|
|
Column('x', Integer),
|
|
|
|
implicit_returning=False)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
an INSERT will look like:
|
|
|
|
|
|
|
|
.. sourcecode:: sql
|
|
|
|
|
|
|
|
INSERT INTO t (x) VALUES (?); select scope_identity()
|
|
|
|
|
|
|
|
* Other dialects such as pymssql will call upon
|
|
|
|
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
|
|
|
|
statement. If the flag ``use_scope_identity=False`` is passed to
|
|
|
|
:func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
|
|
|
|
is used instead.
|
|
|
|
|
|
|
|
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
|
|
|
|
that refers to the identity column explicitly. The SQLAlchemy dialect will
|
|
|
|
detect when an INSERT construct, created using a core :func:`.insert`
|
|
|
|
construct (not a plain string SQL), refers to the identity column, and
|
|
|
|
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
|
|
|
|
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
|
|
|
|
execution. Given this example::
|
|
|
|
|
|
|
|
m = MetaData()
|
|
|
|
t = Table('t', m, Column('id', Integer, primary_key=True),
|
|
|
|
Column('x', Integer))
|
|
|
|
m.create_all(engine)
|
|
|
|
|
|
|
|
engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
|
|
|
|
|
|
|
|
The above column will be created with IDENTITY, however the INSERT statement
|
|
|
|
we emit is specifying explicit values. In the echo output we can see
|
|
|
|
how SQLAlchemy handles this:
|
|
|
|
|
|
|
|
.. sourcecode:: sql
|
|
|
|
|
|
|
|
CREATE TABLE t (
|
|
|
|
id INTEGER NOT NULL IDENTITY(1,1),
|
|
|
|
x INTEGER NULL,
|
|
|
|
PRIMARY KEY (id)
|
|
|
|
)
|
|
|
|
|
|
|
|
COMMIT
|
|
|
|
SET IDENTITY_INSERT t ON
|
|
|
|
INSERT INTO t (id, x) VALUES (?, ?)
|
|
|
|
((1, 1), (2, 2))
|
|
|
|
SET IDENTITY_INSERT t OFF
|
|
|
|
COMMIT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
This
|
|
|
|
is an auxiliary use case suitable for testing and bulk insert scenarios.
|
|
|
|
|
|
|
|
MAX on VARCHAR / NVARCHAR
|
|
|
|
-------------------------
|
|
|
|
|
|
|
|
SQL Server supports the special string "MAX" within the
|
|
|
|
:class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes,
|
|
|
|
to indicate "maximum length possible". The dialect currently handles this as
|
|
|
|
a length of "None" in the base type, rather than supplying a
|
|
|
|
dialect-specific version of these types, so that a base type
|
|
|
|
specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
|
|
|
|
more than one backend without using dialect-specific types.
|
|
|
|
|
|
|
|
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
|
|
|
|
|
|
|
|
my_table = Table(
|
|
|
|
'my_table', metadata,
|
|
|
|
Column('my_data', VARCHAR(None)),
|
|
|
|
Column('my_n_data', NVARCHAR(None))
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
Collation Support
|
|
|
|
-----------------
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
Character collations are supported by the base string types,
|
|
|
|
specified by the string argument "collation"::
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
from sqlalchemy import VARCHAR
|
|
|
|
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
When such a column is associated with a :class:`.Table`, the
|
|
|
|
CREATE TABLE statement for this column will yield::
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. versionadded:: 0.8 Character collations are now part of the base string
|
|
|
|
types.
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
LIMIT/OFFSET Support
|
|
|
|
--------------------
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
|
2010-05-07 19:33:49 +02:00
|
|
|
supported directly through the ``TOP`` Transact SQL keyword::
|
|
|
|
|
|
|
|
select.limit
|
|
|
|
|
|
|
|
will yield::
|
|
|
|
|
|
|
|
SELECT TOP n
|
|
|
|
|
|
|
|
If using SQL Server 2005 or above, LIMIT with OFFSET
|
2017-04-15 18:33:29 +02:00
|
|
|
support is available through the ``ROW_NUMBER OVER`` construct.
|
2010-05-07 19:33:49 +02:00
|
|
|
For versions below 2005, LIMIT with OFFSET usage will fail.
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. _mssql_isolation_level:
|
|
|
|
|
|
|
|
Transaction Isolation Level
|
|
|
|
---------------------------
|
|
|
|
|
|
|
|
All SQL Server dialects support setting of transaction isolation level
|
|
|
|
both via a dialect-specific parameter
|
|
|
|
:paramref:`.create_engine.isolation_level`
|
|
|
|
accepted by :func:`.create_engine`,
|
|
|
|
as well as the :paramref:`.Connection.execution_options.isolation_level`
|
|
|
|
argument as passed to
|
|
|
|
:meth:`.Connection.execution_options`. This feature works by issuing the
|
|
|
|
command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
|
|
|
|
each new connection.
|
|
|
|
|
|
|
|
To set isolation level using :func:`.create_engine`::
|
|
|
|
|
|
|
|
engine = create_engine(
|
|
|
|
"mssql+pyodbc://scott:tiger@ms_2008",
|
|
|
|
isolation_level="REPEATABLE READ"
|
|
|
|
)
|
|
|
|
|
|
|
|
To set using per-connection execution options::
|
|
|
|
|
|
|
|
connection = engine.connect()
|
|
|
|
connection = connection.execution_options(
|
|
|
|
isolation_level="READ COMMITTED"
|
|
|
|
)
|
|
|
|
|
|
|
|
Valid values for ``isolation_level`` include:
|
|
|
|
|
|
|
|
* ``READ COMMITTED``
|
|
|
|
* ``READ UNCOMMITTED``
|
|
|
|
* ``REPEATABLE READ``
|
|
|
|
* ``SERIALIZABLE``
|
|
|
|
* ``SNAPSHOT`` - specific to SQL Server
|
|
|
|
|
|
|
|
.. versionadded:: 1.1 support for isolation level setting on Microsoft
|
|
|
|
SQL Server.
|
|
|
|
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
Nullability
|
|
|
|
-----------
|
|
|
|
MSSQL has support for three levels of column nullability. The default
|
|
|
|
nullability allows nulls and is explicit in the CREATE TABLE
|
|
|
|
construct::
|
|
|
|
|
|
|
|
name VARCHAR(20) NULL
|
|
|
|
|
|
|
|
If ``nullable=None`` is specified then no specification is made. In
|
|
|
|
other words the database's configured default is used. This will
|
|
|
|
render::
|
|
|
|
|
|
|
|
name VARCHAR(20)
|
|
|
|
|
|
|
|
If ``nullable`` is ``True`` or ``False`` then the column will be
|
2017-04-15 18:33:29 +02:00
|
|
|
``NULL`` or ``NOT NULL`` respectively.
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
Date / Time Handling
|
|
|
|
--------------------
|
|
|
|
DATE and TIME are supported. Bind parameters are converted
|
|
|
|
to datetime.datetime() objects as required by most MSSQL drivers,
|
|
|
|
and results are processed from strings if needed.
|
|
|
|
The DATE and TIME types are not available for MSSQL 2005 and
|
|
|
|
previous - if a server version below 2008 is detected, DDL
|
|
|
|
for these types will be issued as DATETIME.
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. _mssql_large_type_deprecation:
|
|
|
|
|
|
|
|
Large Text/Binary Type Deprecation
|
|
|
|
----------------------------------
|
|
|
|
|
|
|
|
Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
|
|
|
|
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
|
|
|
|
in a future release. SQLAlchemy normally relates these types to the
|
|
|
|
:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
|
|
|
|
|
|
|
|
In order to accommodate this change, a new flag ``deprecate_large_types``
|
|
|
|
is added to the dialect, which will be automatically set based on detection
|
|
|
|
of the server version in use, if not otherwise set by the user. The
|
|
|
|
behavior of this flag is as follows:
|
|
|
|
|
|
|
|
* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
|
|
|
|
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
|
|
|
|
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
|
|
|
|
respectively. This is a new behavior as of the addition of this flag.
|
|
|
|
|
|
|
|
* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
|
|
|
|
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
|
|
|
|
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
|
|
|
|
respectively. This is the long-standing behavior of these types.
|
|
|
|
|
|
|
|
* The flag begins with the value ``None``, before a database connection is
|
|
|
|
established. If the dialect is used to render DDL without the flag being
|
|
|
|
set, it is interpreted the same as ``False``.
|
|
|
|
|
|
|
|
* On first connection, the dialect detects if SQL Server version 2012 or greater
|
|
|
|
is in use; if the flag is still at ``None``, it sets it to ``True`` or
|
|
|
|
``False`` based on whether 2012 or greater is detected.
|
|
|
|
|
|
|
|
* The flag can be set to either ``True`` or ``False`` when the dialect
|
|
|
|
is created, typically via :func:`.create_engine`::
|
|
|
|
|
|
|
|
eng = create_engine("mssql+pymssql://user:pass@host/db",
|
|
|
|
deprecate_large_types=True)
|
|
|
|
|
|
|
|
* Complete control over whether the "old" or "new" types are rendered is
|
|
|
|
available in all SQLAlchemy versions by using the UPPERCASE type objects
|
|
|
|
instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
|
|
|
|
:class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
|
|
|
|
fixed and always output exactly that type.
|
|
|
|
|
|
|
|
.. versionadded:: 1.0.0
|
|
|
|
|
|
|
|
.. _legacy_schema_rendering:
|
|
|
|
|
|
|
|
Legacy Schema Mode
|
|
|
|
------------------
|
|
|
|
|
|
|
|
Very old versions of the MSSQL dialect introduced the behavior such that a
|
|
|
|
schema-qualified table would be auto-aliased when used in a
|
|
|
|
SELECT statement; given a table::
|
|
|
|
|
|
|
|
account_table = Table(
|
|
|
|
'account', metadata,
|
|
|
|
Column('id', Integer, primary_key=True),
|
|
|
|
Column('info', String(100)),
|
|
|
|
schema="customer_schema"
|
|
|
|
)
|
|
|
|
|
|
|
|
this legacy mode of rendering would assume that "customer_schema.account"
|
|
|
|
would not be accepted by all parts of the SQL statement, as illustrated
|
|
|
|
below::
|
|
|
|
|
|
|
|
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
|
|
|
|
>>> print(account_table.select().compile(eng))
|
|
|
|
SELECT account_1.id, account_1.info
|
|
|
|
FROM customer_schema.account AS account_1
|
|
|
|
|
|
|
|
This mode of behavior is now off by default, as it appears to have served
|
|
|
|
no purpose; however in the case that legacy applications rely upon it,
|
|
|
|
it is available using the ``legacy_schema_aliasing`` argument to
|
|
|
|
:func:`.create_engine` as illustrated above.
|
|
|
|
|
|
|
|
.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
|
|
|
|
in version 1.0.5 to allow disabling of legacy mode for schemas now
|
|
|
|
defaults to False.
|
|
|
|
|
|
|
|
|
|
|
|
.. _mssql_indexes:
|
|
|
|
|
|
|
|
Clustered Index Support
|
|
|
|
-----------------------
|
|
|
|
|
|
|
|
The MSSQL dialect supports clustered indexes (and primary keys) via the
|
|
|
|
``mssql_clustered`` option. This option is available to :class:`.Index`,
|
|
|
|
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
|
|
|
|
|
|
|
|
To generate a clustered index::
|
|
|
|
|
|
|
|
Index("my_index", table.c.x, mssql_clustered=True)
|
|
|
|
|
|
|
|
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
|
|
|
|
|
|
|
|
To generate a clustered primary key use::
|
|
|
|
|
|
|
|
Table('my_table', metadata,
|
|
|
|
Column('x', ...),
|
|
|
|
Column('y', ...),
|
|
|
|
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
|
|
|
|
|
|
|
|
which will render the table, for example, as::
|
|
|
|
|
|
|
|
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
|
|
|
|
PRIMARY KEY CLUSTERED (x, y))
|
|
|
|
|
|
|
|
Similarly, we can generate a clustered unique constraint using::
|
|
|
|
|
|
|
|
Table('my_table', metadata,
|
|
|
|
Column('x', ...),
|
|
|
|
Column('y', ...),
|
|
|
|
PrimaryKeyConstraint("x"),
|
|
|
|
UniqueConstraint("y", mssql_clustered=True),
|
|
|
|
)
|
|
|
|
|
|
|
|
To explicitly request a non-clustered primary key (for example, when
|
|
|
|
a separate clustered index is desired), use::
|
|
|
|
|
|
|
|
Table('my_table', metadata,
|
|
|
|
Column('x', ...),
|
|
|
|
Column('y', ...),
|
|
|
|
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
|
|
|
|
|
|
|
|
which will render the table, for example, as::
|
|
|
|
|
|
|
|
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
|
|
|
|
PRIMARY KEY NONCLUSTERED (x, y))
|
|
|
|
|
|
|
|
.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
|
|
|
|
to None, rather than False. ``mssql_clustered=False`` now explicitly
|
|
|
|
renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
|
|
|
|
clause entirely, allowing SQL Server defaults to take effect.
|
|
|
|
|
|
|
|
|
|
|
|
MSSQL-Specific Index Options
|
|
|
|
-----------------------------
|
|
|
|
|
|
|
|
In addition to clustering, the MSSQL dialect supports other special options
|
|
|
|
for :class:`.Index`.
|
|
|
|
|
|
|
|
INCLUDE
|
|
|
|
^^^^^^^
|
|
|
|
|
|
|
|
The ``mssql_include`` option renders INCLUDE(colname) for the given string
|
|
|
|
names::
|
|
|
|
|
|
|
|
Index("my_index", table.c.x, mssql_include=['y'])
|
|
|
|
|
|
|
|
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
|
|
|
|
|
|
|
|
.. versionadded:: 0.8
|
|
|
|
|
|
|
|
Index ordering
|
|
|
|
^^^^^^^^^^^^^^
|
|
|
|
|
|
|
|
Index ordering is available via functional expressions, such as::
|
|
|
|
|
|
|
|
Index("my_index", table.c.x.desc())
|
|
|
|
|
|
|
|
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
|
|
|
|
|
|
|
|
.. versionadded:: 0.8
|
|
|
|
|
|
|
|
.. seealso::
|
|
|
|
|
|
|
|
:ref:`schema_indexes_functional`
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
Compatibility Levels
|
|
|
|
--------------------
|
|
|
|
MSSQL supports the notion of setting compatibility levels at the
|
|
|
|
database level. This allows, for instance, to run a database that
|
2017-04-15 18:33:29 +02:00
|
|
|
is compatible with SQL2000 while running on a SQL2005 database
|
|
|
|
server. ``server_version_info`` will always return the database
|
2010-05-07 19:33:49 +02:00
|
|
|
server version information (in this case SQL2005) and not the
|
2017-04-15 18:33:29 +02:00
|
|
|
compatibility level information. Because of this, if running under
|
2010-05-07 19:33:49 +02:00
|
|
|
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
|
|
|
|
statements that are unable to be parsed by the database server.
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
Triggers
|
|
|
|
--------
|
|
|
|
|
|
|
|
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
|
|
|
|
generated primary key values via IDENTITY columns or other
|
|
|
|
server side defaults. MS-SQL does not
|
|
|
|
allow the usage of OUTPUT INSERTED on tables that have triggers.
|
|
|
|
To disable the usage of OUTPUT INSERTED on a per-table basis,
|
|
|
|
specify ``implicit_returning=False`` for each :class:`.Table`
|
|
|
|
which has triggers::
|
|
|
|
|
|
|
|
Table('mytable', metadata,
|
|
|
|
Column('id', Integer, primary_key=True),
|
|
|
|
# ...,
|
|
|
|
implicit_returning=False
|
|
|
|
)
|
|
|
|
|
|
|
|
Declarative form::
|
|
|
|
|
|
|
|
class MyClass(Base):
|
|
|
|
# ...
|
|
|
|
__table_args__ = {'implicit_returning':False}
|
|
|
|
|
|
|
|
|
|
|
|
This option can also be specified engine-wide using the
|
|
|
|
``implicit_returning=False`` argument on :func:`.create_engine`.
|
|
|
|
|
|
|
|
.. _mssql_rowcount_versioning:
|
|
|
|
|
|
|
|
Rowcount Support / ORM Versioning
|
|
|
|
---------------------------------
|
|
|
|
|
|
|
|
The SQL Server drivers have very limited ability to return the number
|
|
|
|
of rows updated from an UPDATE or DELETE statement. In particular, the
|
|
|
|
pymssql driver has no support, whereas the pyodbc driver can only return
|
|
|
|
this value under certain conditions.
|
|
|
|
|
|
|
|
In particular, updated rowcount is not available when OUTPUT INSERTED
|
|
|
|
is used. This impacts the SQLAlchemy ORM's versioning feature when
|
|
|
|
server-side versioning schemes are used. When
|
|
|
|
using pyodbc, the "implicit_returning" flag needs to be set to false
|
|
|
|
for any ORM mapped class that uses a version_id column in conjunction with
|
|
|
|
a server-side version generator::
|
|
|
|
|
|
|
|
class MyTable(Base):
|
|
|
|
__tablename__ = 'mytable'
|
|
|
|
id = Column(Integer, primary_key=True)
|
|
|
|
stuff = Column(String(10))
|
|
|
|
timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
|
|
|
|
__mapper_args__ = {
|
|
|
|
'version_id_col': timestamp,
|
|
|
|
'version_id_generator': False,
|
|
|
|
}
|
|
|
|
__table_args__ = {
|
|
|
|
'implicit_returning': False
|
|
|
|
}
|
|
|
|
|
|
|
|
Without the implicit_returning flag above, the UPDATE statement will
|
|
|
|
use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
|
|
|
|
-1, causing the versioning logic to fail.
|
|
|
|
|
|
|
|
Enabling Snapshot Isolation
|
|
|
|
---------------------------
|
|
|
|
|
|
|
|
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
|
|
|
|
isolation mode that locks entire tables, and causes even mildly concurrent
|
|
|
|
applications to have long held locks and frequent deadlocks.
|
|
|
|
Enabling snapshot isolation for the database as a whole is recommended
|
|
|
|
for modern levels of concurrency support. This is accomplished via the
|
|
|
|
following ALTER DATABASE commands executed at the SQL prompt::
|
|
|
|
|
|
|
|
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
|
|
|
|
|
|
|
|
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
|
|
|
|
|
|
|
|
Background on SQL Server snapshot isolation is available at
|
|
|
|
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
Known Issues
|
|
|
|
------------
|
|
|
|
|
|
|
|
* No support for more than one ``IDENTITY`` column per table
|
2017-04-15 18:33:29 +02:00
|
|
|
* reflection of indexes does not work with versions older than
|
|
|
|
SQL Server 2005
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
"""
|
2017-04-15 18:33:29 +02:00
|
|
|
import datetime
|
|
|
|
import operator
|
|
|
|
import re
|
|
|
|
|
|
|
|
from ... import sql, schema as sa_schema, exc, util
|
|
|
|
from ...sql import compiler, expression, util as sql_util
|
|
|
|
from ... import engine
|
|
|
|
from ...engine import reflection, default
|
|
|
|
from ... import types as sqltypes
|
|
|
|
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
|
|
|
|
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
|
|
|
|
TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
|
|
|
|
|
|
|
|
|
|
|
|
from ...util import update_wrapper
|
|
|
|
from . import information_schema as ischema
|
|
|
|
|
|
|
|
# http://sqlserverbuilds.blogspot.com/
|
|
|
|
MS_2016_VERSION = (13,)
|
|
|
|
MS_2014_VERSION = (12,)
|
|
|
|
MS_2012_VERSION = (11,)
|
2010-05-07 19:33:49 +02:00
|
|
|
MS_2008_VERSION = (10,)
|
|
|
|
MS_2005_VERSION = (9,)
|
|
|
|
MS_2000_VERSION = (8,)
|
|
|
|
|
|
|
|
RESERVED_WORDS = set(
|
|
|
|
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
|
|
|
|
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
|
|
|
|
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
|
|
|
|
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
|
|
|
|
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
|
|
|
|
'current_date', 'current_time', 'current_timestamp', 'current_user',
|
|
|
|
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
|
|
|
|
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
|
|
|
|
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
|
|
|
|
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
|
|
|
|
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
|
|
|
|
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
|
|
|
|
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
|
|
|
|
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
|
|
|
|
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
|
|
|
|
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
|
|
|
|
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
|
|
|
|
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
|
|
|
|
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
|
|
|
|
'reconfigure', 'references', 'replication', 'restore', 'restrict',
|
|
|
|
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
|
|
|
|
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
|
|
|
|
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
|
|
|
|
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
|
|
|
|
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
|
|
|
|
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
|
|
|
|
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
|
|
|
|
'writetext',
|
2017-04-15 18:33:29 +02:00
|
|
|
])
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
class REAL(sqltypes.REAL):
|
2010-05-07 19:33:49 +02:00
|
|
|
__visit_name__ = 'REAL'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def __init__(self, **kw):
|
|
|
|
# REAL is a synonym for FLOAT(24) on SQL server
|
|
|
|
kw['precision'] = 24
|
|
|
|
super(REAL, self).__init__(**kw)
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
class TINYINT(sqltypes.Integer):
|
|
|
|
__visit_name__ = 'TINYINT'
|
|
|
|
|
|
|
|
|
|
|
|
# MSSQL DATE/TIME types have varied behavior, sometimes returning
|
|
|
|
# strings. MSDate/TIME check for everything, and always
|
|
|
|
# filter bind parameters into datetime objects (required by pyodbc,
|
|
|
|
# not sure about other dialects).
|
|
|
|
|
|
|
|
class _MSDate(sqltypes.Date):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def bind_processor(self, dialect):
|
|
|
|
def process(value):
|
|
|
|
if type(value) == datetime.date:
|
|
|
|
return datetime.datetime(value.year, value.month, value.day)
|
|
|
|
else:
|
|
|
|
return value
|
|
|
|
return process
|
|
|
|
|
|
|
|
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def result_processor(self, dialect, coltype):
|
|
|
|
def process(value):
|
|
|
|
if isinstance(value, datetime.datetime):
|
|
|
|
return value.date()
|
2017-04-15 18:33:29 +02:00
|
|
|
elif isinstance(value, util.string_types):
|
|
|
|
m = self._reg.match(value)
|
|
|
|
if not m:
|
|
|
|
raise ValueError(
|
|
|
|
"could not parse %r as a date value" % (value, ))
|
|
|
|
return datetime.date(*[
|
|
|
|
int(x or 0)
|
|
|
|
for x in m.groups()
|
|
|
|
])
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
return value
|
|
|
|
return process
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class TIME(sqltypes.TIME):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def __init__(self, precision=None, **kwargs):
|
|
|
|
self.precision = precision
|
|
|
|
super(TIME, self).__init__()
|
|
|
|
|
|
|
|
__zero_date = datetime.date(1900, 1, 1)
|
|
|
|
|
|
|
|
def bind_processor(self, dialect):
|
|
|
|
def process(value):
|
|
|
|
if isinstance(value, datetime.datetime):
|
2017-04-15 18:33:29 +02:00
|
|
|
value = datetime.datetime.combine(
|
|
|
|
self.__zero_date, value.time())
|
2010-05-07 19:33:49 +02:00
|
|
|
elif isinstance(value, datetime.time):
|
|
|
|
value = datetime.datetime.combine(self.__zero_date, value)
|
|
|
|
return value
|
|
|
|
return process
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def result_processor(self, dialect, coltype):
|
|
|
|
def process(value):
|
|
|
|
if isinstance(value, datetime.datetime):
|
|
|
|
return value.time()
|
2017-04-15 18:33:29 +02:00
|
|
|
elif isinstance(value, util.string_types):
|
|
|
|
m = self._reg.match(value)
|
|
|
|
if not m:
|
|
|
|
raise ValueError(
|
|
|
|
"could not parse %r as a time value" % (value, ))
|
|
|
|
return datetime.time(*[
|
|
|
|
int(x or 0)
|
|
|
|
for x in m.groups()])
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
return value
|
|
|
|
return process
|
2017-04-15 18:33:29 +02:00
|
|
|
_MSTime = TIME
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
class _DateTimeBase(object):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def bind_processor(self, dialect):
|
|
|
|
def process(value):
|
|
|
|
if type(value) == datetime.date:
|
|
|
|
return datetime.datetime(value.year, value.month, value.day)
|
|
|
|
else:
|
|
|
|
return value
|
|
|
|
return process
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
|
|
|
|
pass
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
|
|
|
|
__visit_name__ = 'SMALLDATETIME'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
|
|
|
|
__visit_name__ = 'DATETIME2'
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
def __init__(self, precision=None, **kw):
|
|
|
|
super(DATETIME2, self).__init__(**kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
self.precision = precision
|
|
|
|
|
|
|
|
|
|
|
|
# TODO: is this not an Interval ?
|
|
|
|
class DATETIMEOFFSET(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = 'DATETIMEOFFSET'
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def __init__(self, precision=None, **kwargs):
|
|
|
|
self.precision = precision
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class _StringType(object):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
"""Base for MSSQL string types."""
|
|
|
|
|
|
|
|
def __init__(self, collation=None):
|
2017-04-15 18:33:29 +02:00
|
|
|
super(_StringType, self).__init__(collation=collation)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
class NTEXT(sqltypes.UnicodeText):
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
|
|
|
|
characters."""
|
|
|
|
|
|
|
|
__visit_name__ = 'NTEXT'
|
|
|
|
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
|
|
|
|
"""The MSSQL VARBINARY type.
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
This type extends both :class:`.types.VARBINARY` and
|
|
|
|
:class:`.types.LargeBinary`. In "deprecate_large_types" mode,
|
|
|
|
the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
|
|
|
|
on SQL Server.
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. versionadded:: 1.0.0
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
.. seealso::
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
:ref:`mssql_large_type_deprecation`
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
"""
|
|
|
|
__visit_name__ = 'VARBINARY'
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
class IMAGE(sqltypes.LargeBinary):
|
|
|
|
__visit_name__ = 'IMAGE'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class BIT(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = 'BIT'
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
class MONEY(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = 'MONEY'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class SMALLMONEY(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = 'SMALLMONEY'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = "UNIQUEIDENTIFIER"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class SQL_VARIANT(sqltypes.TypeEngine):
|
|
|
|
__visit_name__ = 'SQL_VARIANT'
|
|
|
|
|
|
|
|
# old names.
|
|
|
|
MSDateTime = _MSDateTime
|
|
|
|
MSDate = _MSDate
|
|
|
|
MSReal = REAL
|
|
|
|
MSTinyInteger = TINYINT
|
|
|
|
MSTime = TIME
|
|
|
|
MSSmallDateTime = SMALLDATETIME
|
|
|
|
MSDateTime2 = DATETIME2
|
|
|
|
MSDateTimeOffset = DATETIMEOFFSET
|
|
|
|
MSText = TEXT
|
|
|
|
MSNText = NTEXT
|
|
|
|
MSString = VARCHAR
|
|
|
|
MSNVarchar = NVARCHAR
|
|
|
|
MSChar = CHAR
|
|
|
|
MSNChar = NCHAR
|
|
|
|
MSBinary = BINARY
|
|
|
|
MSVarBinary = VARBINARY
|
|
|
|
MSImage = IMAGE
|
|
|
|
MSBit = BIT
|
|
|
|
MSMoney = MONEY
|
|
|
|
MSSmallMoney = SMALLMONEY
|
|
|
|
MSUniqueIdentifier = UNIQUEIDENTIFIER
|
|
|
|
MSVariant = SQL_VARIANT
|
|
|
|
|
|
|
|
ischema_names = {
|
2017-04-15 18:33:29 +02:00
|
|
|
'int': INTEGER,
|
2010-05-07 19:33:49 +02:00
|
|
|
'bigint': BIGINT,
|
2017-04-15 18:33:29 +02:00
|
|
|
'smallint': SMALLINT,
|
|
|
|
'tinyint': TINYINT,
|
|
|
|
'varchar': VARCHAR,
|
|
|
|
'nvarchar': NVARCHAR,
|
|
|
|
'char': CHAR,
|
|
|
|
'nchar': NCHAR,
|
|
|
|
'text': TEXT,
|
|
|
|
'ntext': NTEXT,
|
|
|
|
'decimal': DECIMAL,
|
|
|
|
'numeric': NUMERIC,
|
|
|
|
'float': FLOAT,
|
|
|
|
'datetime': DATETIME,
|
|
|
|
'datetime2': DATETIME2,
|
|
|
|
'datetimeoffset': DATETIMEOFFSET,
|
2010-05-07 19:33:49 +02:00
|
|
|
'date': DATE,
|
|
|
|
'time': TIME,
|
2017-04-15 18:33:29 +02:00
|
|
|
'smalldatetime': SMALLDATETIME,
|
|
|
|
'binary': BINARY,
|
|
|
|
'varbinary': VARBINARY,
|
2010-05-07 19:33:49 +02:00
|
|
|
'bit': BIT,
|
2017-04-15 18:33:29 +02:00
|
|
|
'real': REAL,
|
|
|
|
'image': IMAGE,
|
2010-05-07 19:33:49 +02:00
|
|
|
'timestamp': TIMESTAMP,
|
|
|
|
'money': MONEY,
|
|
|
|
'smallmoney': SMALLMONEY,
|
|
|
|
'uniqueidentifier': UNIQUEIDENTIFIER,
|
|
|
|
'sql_variant': SQL_VARIANT,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class MSTypeCompiler(compiler.GenericTypeCompiler):
|
2017-04-15 18:33:29 +02:00
|
|
|
def _extend(self, spec, type_, length=None):
|
2010-05-07 19:33:49 +02:00
|
|
|
"""Extend a string-type declaration with standard SQL
|
|
|
|
COLLATE annotations.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
if getattr(type_, 'collation', None):
|
|
|
|
collation = 'COLLATE %s' % type_.collation
|
|
|
|
else:
|
|
|
|
collation = None
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
if not length:
|
|
|
|
length = type_.length
|
|
|
|
|
|
|
|
if length:
|
|
|
|
spec = spec + "(%s)" % length
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
return ' '.join([c for c in (spec, collation)
|
2017-04-15 18:33:29 +02:00
|
|
|
if c is not None])
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_FLOAT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
precision = getattr(type_, 'precision', None)
|
|
|
|
if precision is None:
|
|
|
|
return "FLOAT"
|
|
|
|
else:
|
|
|
|
return "FLOAT(%(precision)s)" % {'precision': precision}
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_TINYINT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "TINYINT"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_DATETIMEOFFSET(self, type_, **kw):
|
|
|
|
if type_.precision is not None:
|
2010-05-07 19:33:49 +02:00
|
|
|
return "DATETIMEOFFSET(%s)" % type_.precision
|
|
|
|
else:
|
|
|
|
return "DATETIMEOFFSET"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_TIME(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
precision = getattr(type_, 'precision', None)
|
2017-04-15 18:33:29 +02:00
|
|
|
if precision is not None:
|
2010-05-07 19:33:49 +02:00
|
|
|
return "TIME(%s)" % precision
|
|
|
|
else:
|
|
|
|
return "TIME"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_DATETIME2(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
precision = getattr(type_, 'precision', None)
|
2017-04-15 18:33:29 +02:00
|
|
|
if precision is not None:
|
2010-05-07 19:33:49 +02:00
|
|
|
return "DATETIME2(%s)" % precision
|
|
|
|
else:
|
|
|
|
return "DATETIME2"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_SMALLDATETIME(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "SMALLDATETIME"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_unicode(self, type_, **kw):
|
|
|
|
return self.visit_NVARCHAR(type_, **kw)
|
|
|
|
|
|
|
|
def visit_text(self, type_, **kw):
|
|
|
|
if self.dialect.deprecate_large_types:
|
|
|
|
return self.visit_VARCHAR(type_, **kw)
|
|
|
|
else:
|
|
|
|
return self.visit_TEXT(type_, **kw)
|
|
|
|
|
|
|
|
def visit_unicode_text(self, type_, **kw):
|
|
|
|
if self.dialect.deprecate_large_types:
|
|
|
|
return self.visit_NVARCHAR(type_, **kw)
|
|
|
|
else:
|
|
|
|
return self.visit_NTEXT(type_, **kw)
|
|
|
|
|
|
|
|
def visit_NTEXT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return self._extend("NTEXT", type_)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_TEXT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return self._extend("TEXT", type_)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_VARCHAR(self, type_, **kw):
|
|
|
|
return self._extend("VARCHAR", type_, length=type_.length or 'max')
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_CHAR(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return self._extend("CHAR", type_)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_NCHAR(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return self._extend("NCHAR", type_)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_NVARCHAR(self, type_, **kw):
|
|
|
|
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_date(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
if self.dialect.server_version_info < MS_2008_VERSION:
|
2017-04-15 18:33:29 +02:00
|
|
|
return self.visit_DATETIME(type_, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
return self.visit_DATE(type_, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_time(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
if self.dialect.server_version_info < MS_2008_VERSION:
|
2017-04-15 18:33:29 +02:00
|
|
|
return self.visit_DATETIME(type_, **kw)
|
|
|
|
else:
|
|
|
|
return self.visit_TIME(type_, **kw)
|
|
|
|
|
|
|
|
def visit_large_binary(self, type_, **kw):
|
|
|
|
if self.dialect.deprecate_large_types:
|
|
|
|
return self.visit_VARBINARY(type_, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
return self.visit_IMAGE(type_, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_IMAGE(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "IMAGE"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_VARBINARY(self, type_, **kw):
|
|
|
|
return self._extend(
|
|
|
|
"VARBINARY",
|
|
|
|
type_,
|
|
|
|
length=type_.length or 'max')
|
|
|
|
|
|
|
|
def visit_boolean(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return self.visit_BIT(type_)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_BIT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "BIT"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_MONEY(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "MONEY"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_SMALLMONEY(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return 'SMALLMONEY'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "UNIQUEIDENTIFIER"
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_SQL_VARIANT(self, type_, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return 'SQL_VARIANT'
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class MSExecutionContext(default.DefaultExecutionContext):
|
|
|
|
_enable_identity_insert = False
|
|
|
|
_select_lastrowid = False
|
|
|
|
_result_proxy = None
|
|
|
|
_lastrowid = None
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
def _opt_encode(self, statement):
|
|
|
|
if not self.dialect.supports_unicode_statements:
|
|
|
|
return self.dialect._encoder(statement)[0]
|
|
|
|
else:
|
|
|
|
return statement
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def pre_exec(self):
|
|
|
|
"""Activate IDENTITY_INSERT if needed."""
|
|
|
|
|
|
|
|
if self.isinsert:
|
|
|
|
tbl = self.compiled.statement.table
|
|
|
|
seq_column = tbl._autoincrement_column
|
|
|
|
insert_has_sequence = seq_column is not None
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
if insert_has_sequence:
|
2017-04-15 18:33:29 +02:00
|
|
|
self._enable_identity_insert = \
|
|
|
|
seq_column.key in self.compiled_parameters[0] or \
|
|
|
|
(
|
|
|
|
self.compiled.statement.parameters and (
|
|
|
|
(
|
|
|
|
self.compiled.statement._has_multi_parameters
|
|
|
|
and
|
|
|
|
seq_column.key in
|
|
|
|
self.compiled.statement.parameters[0]
|
|
|
|
) or (
|
|
|
|
not
|
|
|
|
self.compiled.statement._has_multi_parameters
|
|
|
|
and
|
|
|
|
seq_column.key in
|
|
|
|
self.compiled.statement.parameters
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
self._enable_identity_insert = False
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
self._select_lastrowid = not self.compiled.inline and \
|
|
|
|
insert_has_sequence and \
|
|
|
|
not self.compiled.returning and \
|
|
|
|
not self._enable_identity_insert and \
|
|
|
|
not self.executemany
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
if self._enable_identity_insert:
|
2017-04-15 18:33:29 +02:00
|
|
|
self.root_connection._cursor_execute(
|
|
|
|
self.cursor,
|
|
|
|
self._opt_encode(
|
|
|
|
"SET IDENTITY_INSERT %s ON" %
|
|
|
|
self.dialect.identifier_preparer.format_table(tbl)),
|
|
|
|
(),
|
|
|
|
self)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def post_exec(self):
|
|
|
|
"""Disable IDENTITY_INSERT if enabled."""
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
conn = self.root_connection
|
2010-05-07 19:33:49 +02:00
|
|
|
if self._select_lastrowid:
|
|
|
|
if self.dialect.use_scope_identity:
|
2017-04-15 18:33:29 +02:00
|
|
|
conn._cursor_execute(
|
|
|
|
self.cursor,
|
|
|
|
"SELECT scope_identity() AS lastrowid", (), self)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
conn._cursor_execute(self.cursor,
|
|
|
|
"SELECT @@identity AS lastrowid",
|
|
|
|
(),
|
|
|
|
self)
|
2010-05-07 19:33:49 +02:00
|
|
|
# fetchall() ensures the cursor is consumed without closing it
|
|
|
|
row = self.cursor.fetchall()[0]
|
|
|
|
self._lastrowid = int(row[0])
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
if (self.isinsert or self.isupdate or self.isdelete) and \
|
|
|
|
self.compiled.returning:
|
|
|
|
self._result_proxy = engine.FullyBufferedResultProxy(self)
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
if self._enable_identity_insert:
|
2017-04-15 18:33:29 +02:00
|
|
|
conn._cursor_execute(
|
|
|
|
self.cursor,
|
|
|
|
self._opt_encode(
|
|
|
|
"SET IDENTITY_INSERT %s OFF" %
|
|
|
|
self.dialect.identifier_preparer. format_table(
|
|
|
|
self.compiled.statement.table)),
|
|
|
|
(),
|
|
|
|
self)
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def get_lastrowid(self):
|
|
|
|
return self._lastrowid
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def handle_dbapi_exception(self, e):
|
|
|
|
if self._enable_identity_insert:
|
|
|
|
try:
|
2017-04-15 18:33:29 +02:00
|
|
|
self.cursor.execute(
|
|
|
|
self._opt_encode(
|
|
|
|
"SET IDENTITY_INSERT %s OFF" %
|
|
|
|
self.dialect.identifier_preparer. format_table(
|
|
|
|
self.compiled.statement.table)))
|
|
|
|
except Exception:
|
2010-05-07 19:33:49 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
def get_result_proxy(self):
|
|
|
|
if self._result_proxy:
|
|
|
|
return self._result_proxy
|
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
return engine.ResultProxy(self)
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
class MSSQLCompiler(compiler.SQLCompiler):
|
|
|
|
returning_precedes_values = True
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
extract_map = util.update_copy(
|
|
|
|
compiler.SQLCompiler.extract_map,
|
|
|
|
{
|
2017-04-15 18:33:29 +02:00
|
|
|
'doy': 'dayofyear',
|
|
|
|
'dow': 'weekday',
|
|
|
|
'milliseconds': 'millisecond',
|
|
|
|
'microseconds': 'microsecond'
|
|
|
|
})
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
self.tablealiases = {}
|
2017-04-15 18:33:29 +02:00
|
|
|
super(MSSQLCompiler, self).__init__(*args, **kwargs)
|
|
|
|
|
|
|
|
def _with_legacy_schema_aliasing(fn):
|
|
|
|
def decorate(self, *arg, **kw):
|
|
|
|
if self.dialect.legacy_schema_aliasing:
|
|
|
|
return fn(self, *arg, **kw)
|
|
|
|
else:
|
|
|
|
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
|
|
|
|
return super_(*arg, **kw)
|
|
|
|
return decorate
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def visit_now_func(self, fn, **kw):
|
|
|
|
return "CURRENT_TIMESTAMP"
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def visit_current_date_func(self, fn, **kw):
|
|
|
|
return "GETDATE()"
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def visit_length_func(self, fn, **kw):
|
|
|
|
return "LEN%s" % self.function_argspec(fn, **kw)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def visit_char_length_func(self, fn, **kw):
|
|
|
|
return "LEN%s" % self.function_argspec(fn, **kw)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
def visit_concat_op_binary(self, binary, operator, **kw):
|
|
|
|
return "%s + %s" % \
|
|
|
|
(self.process(binary.left, **kw),
|
|
|
|
self.process(binary.right, **kw))
|
|
|
|
|
|
|
|
def visit_true(self, expr, **kw):
|
|
|
|
return '1'
|
|
|
|
|
|
|
|
def visit_false(self, expr, **kw):
|
|
|
|
return '0'
|
|
|
|
|
|
|
|
def visit_match_op_binary(self, binary, operator, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
return "CONTAINS (%s, %s)" % (
|
2017-04-15 18:33:29 +02:00
|
|
|
self.process(binary.left, **kw),
|
|
|
|
self.process(binary.right, **kw))
|
|
|
|
|
|
|
|
def get_select_precolumns(self, select, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
""" MS-SQL puts TOP, it's version of LIMIT here """
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
s = ""
|
|
|
|
if select._distinct:
|
|
|
|
s += "DISTINCT "
|
|
|
|
|
|
|
|
if select._simple_int_limit and not select._offset:
|
|
|
|
# ODBC drivers and possibly others
|
|
|
|
# don't support bind params in the SELECT clause on SQL Server.
|
|
|
|
# so have to use literal here.
|
|
|
|
s += "TOP %d " % select._limit
|
|
|
|
|
|
|
|
if s:
|
2010-05-07 19:33:49 +02:00
|
|
|
return s
|
2017-04-15 18:33:29 +02:00
|
|
|
else:
|
|
|
|
return compiler.SQLCompiler.get_select_precolumns(
|
|
|
|
self, select, **kw)
|
|
|
|
|
|
|
|
def get_from_hint_text(self, table, text):
|
|
|
|
return text
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def get_crud_hint_text(self, table, text):
|
|
|
|
return text
|
|
|
|
|
|
|
|
def limit_clause(self, select, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
# Limit in mssql is after the select keyword
|
|
|
|
return ""
|
|
|
|
|
|
|
|
def visit_select(self, select, **kwargs):
|
|
|
|
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
|
|
|
|
so tries to wrap it in a subquery with ``row_number()`` criterion.
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
"""
|
2017-04-15 18:33:29 +02:00
|
|
|
if (
|
|
|
|
(
|
|
|
|
not select._simple_int_limit and
|
|
|
|
select._limit_clause is not None
|
|
|
|
) or (
|
|
|
|
select._offset_clause is not None and
|
|
|
|
not select._simple_int_offset or select._offset
|
|
|
|
)
|
|
|
|
) and not getattr(select, '_mssql_visit', None):
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
# to use ROW_NUMBER(), an ORDER BY is required.
|
|
|
|
if not select._order_by_clause.clauses:
|
|
|
|
raise exc.CompileError('MSSQL requires an order_by when '
|
|
|
|
'using an OFFSET or a non-simple '
|
|
|
|
'LIMIT clause')
|
|
|
|
|
|
|
|
_order_by_clauses = [
|
|
|
|
sql_util.unwrap_label_reference(elem)
|
|
|
|
for elem in select._order_by_clause.clauses
|
|
|
|
]
|
|
|
|
|
|
|
|
limit_clause = select._limit_clause
|
|
|
|
offset_clause = select._offset_clause
|
|
|
|
kwargs['select_wraps_for'] = select
|
|
|
|
select = select._generate()
|
2010-05-07 19:33:49 +02:00
|
|
|
select._mssql_visit = True
|
2017-04-15 18:33:29 +02:00
|
|
|
select = select.column(
|
|
|
|
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
|
|
|
|
.label("mssql_rn")).order_by(None).alias()
|
|
|
|
|
|
|
|
mssql_rn = sql.column('mssql_rn')
|
|
|
|
limitselect = sql.select([c for c in select.c if
|
|
|
|
c.key != 'mssql_rn'])
|
|
|
|
if offset_clause is not None:
|
|
|
|
limitselect.append_whereclause(mssql_rn > offset_clause)
|
|
|
|
if limit_clause is not None:
|
|
|
|
limitselect.append_whereclause(
|
|
|
|
mssql_rn <= (limit_clause + offset_clause))
|
|
|
|
else:
|
|
|
|
limitselect.append_whereclause(
|
|
|
|
mssql_rn <= (limit_clause))
|
|
|
|
return self.process(limitselect, **kwargs)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
@_with_legacy_schema_aliasing
|
|
|
|
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
|
|
|
|
if mssql_aliased is table or iscrud:
|
2010-05-07 19:33:49 +02:00
|
|
|
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
|
|
|
|
|
|
|
|
# alias schema-qualified tables
|
|
|
|
alias = self._schema_aliased_table(table)
|
|
|
|
if alias is not None:
|
2017-04-15 18:33:29 +02:00
|
|
|
return self.process(alias, mssql_aliased=table, **kwargs)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
@_with_legacy_schema_aliasing
|
|
|
|
def visit_alias(self, alias, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
# translate for schema-qualified table aliases
|
2017-04-15 18:33:29 +02:00
|
|
|
kw['mssql_aliased'] = alias.original
|
|
|
|
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
@_with_legacy_schema_aliasing
|
|
|
|
def visit_column(self, column, add_to_result_map=None, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
if column.table is not None and \
|
2017-04-15 18:33:29 +02:00
|
|
|
(not self.isupdate and not self.isdelete) or \
|
|
|
|
self.is_subquery():
|
2010-05-07 19:33:49 +02:00
|
|
|
# translate for schema-qualified table aliases
|
|
|
|
t = self._schema_aliased_table(column.table)
|
|
|
|
if t is not None:
|
2017-04-15 18:33:29 +02:00
|
|
|
converted = expression._corresponding_column_or_error(
|
|
|
|
t, column)
|
|
|
|
if add_to_result_map is not None:
|
|
|
|
add_to_result_map(
|
|
|
|
column.name,
|
|
|
|
column.name,
|
|
|
|
(column, column.name, column.key),
|
|
|
|
column.type
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
return super(MSSQLCompiler, self).\
|
|
|
|
visit_column(converted, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
return super(MSSQLCompiler, self).visit_column(
|
|
|
|
column, add_to_result_map=add_to_result_map, **kw)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def _schema_aliased_table(self, table):
|
|
|
|
if getattr(table, 'schema', None) is not None:
|
|
|
|
if table not in self.tablealiases:
|
|
|
|
self.tablealiases[table] = table.alias()
|
|
|
|
return self.tablealiases[table]
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def visit_extract(self, extract, **kw):
|
|
|
|
field = self.extract_map.get(extract.field, extract.field)
|
|
|
|
return 'DATEPART(%s, %s)' % \
|
|
|
|
(field, self.process(extract.expr, **kw))
|
|
|
|
|
|
|
|
def visit_savepoint(self, savepoint_stmt):
|
|
|
|
return "SAVE TRANSACTION %s" % \
|
|
|
|
self.preparer.format_savepoint(savepoint_stmt)
|
|
|
|
|
|
|
|
def visit_rollback_to_savepoint(self, savepoint_stmt):
|
|
|
|
return ("ROLLBACK TRANSACTION %s"
|
|
|
|
% self.preparer.format_savepoint(savepoint_stmt))
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def visit_binary(self, binary, **kwargs):
|
|
|
|
"""Move bind parameters to the right-hand side of an operator, where
|
|
|
|
possible.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if (
|
2017-04-15 18:33:29 +02:00
|
|
|
isinstance(binary.left, expression.BindParameter)
|
2010-05-07 19:33:49 +02:00
|
|
|
and binary.operator == operator.eq
|
2017-04-15 18:33:29 +02:00
|
|
|
and not isinstance(binary.right, expression.BindParameter)
|
|
|
|
):
|
|
|
|
return self.process(
|
|
|
|
expression.BinaryExpression(binary.right,
|
|
|
|
binary.left,
|
|
|
|
binary.operator),
|
|
|
|
**kwargs)
|
|
|
|
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def returning_clause(self, stmt, returning_cols):
|
|
|
|
|
|
|
|
if self.isinsert or self.isupdate:
|
|
|
|
target = stmt.table.alias("inserted")
|
|
|
|
else:
|
|
|
|
target = stmt.table.alias("deleted")
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
adapter = sql_util.ClauseAdapter(target)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
columns = [
|
2017-04-15 18:33:29 +02:00
|
|
|
self._label_select_column(None, adapter.traverse(c),
|
|
|
|
True, False, {})
|
2010-05-07 19:33:49 +02:00
|
|
|
for c in expression._select_iterables(returning_cols)
|
|
|
|
]
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
return 'OUTPUT ' + ', '.join(columns)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def get_cte_preamble(self, recursive):
|
|
|
|
# SQL Server finds it too inconvenient to accept
|
|
|
|
# an entirely optional, SQL standard specified,
|
|
|
|
# "RECURSIVE" word with their "WITH",
|
|
|
|
# so here we go
|
|
|
|
return "WITH"
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def label_select_column(self, select, column, asfrom):
|
|
|
|
if isinstance(column, expression.Function):
|
|
|
|
return column.label(None)
|
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
return super(MSSQLCompiler, self).\
|
|
|
|
label_select_column(select, column, asfrom)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def for_update_clause(self, select):
|
2017-04-15 18:33:29 +02:00
|
|
|
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
|
|
|
|
# SQLAlchemy doesn't use
|
2010-05-07 19:33:49 +02:00
|
|
|
return ''
|
|
|
|
|
|
|
|
def order_by_clause(self, select, **kw):
|
|
|
|
order_by = self.process(select._order_by_clause, **kw)
|
|
|
|
|
|
|
|
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
|
|
|
|
if order_by and (not self.is_subquery() or select._limit):
|
|
|
|
return " ORDER BY " + order_by
|
|
|
|
else:
|
|
|
|
return ""
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def update_from_clause(self, update_stmt,
|
|
|
|
from_table, extra_froms,
|
|
|
|
from_hints,
|
|
|
|
**kw):
|
|
|
|
"""Render the UPDATE..FROM clause specific to MSSQL.
|
|
|
|
|
|
|
|
In MSSQL, if the UPDATE statement involves an alias of the table to
|
|
|
|
be updated, then the table itself must be added to the FROM list as
|
|
|
|
well. Otherwise, it is optional. Here, we add it regardless.
|
|
|
|
|
|
|
|
"""
|
|
|
|
return "FROM " + ', '.join(
|
|
|
|
t._compiler_dispatch(self, asfrom=True,
|
|
|
|
fromhints=from_hints, **kw)
|
|
|
|
for t in [from_table] + extra_froms)
|
|
|
|
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class MSSQLStrictCompiler(MSSQLCompiler):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
"""A subclass of MSSQLCompiler which disables the usage of bind
|
|
|
|
parameters where not allowed natively by MS-SQL.
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
A dialect may use this compiler on a platform where native
|
|
|
|
binds are used.
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
"""
|
|
|
|
ansi_bind_rules = True
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_in_op_binary(self, binary, operator, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
kw['literal_binds'] = True
|
|
|
|
return "%s IN %s" % (
|
2017-04-15 18:33:29 +02:00
|
|
|
self.process(binary.left, **kw),
|
|
|
|
self.process(binary.right, **kw)
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_notin_op_binary(self, binary, operator, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
kw['literal_binds'] = True
|
|
|
|
return "%s NOT IN %s" % (
|
2017-04-15 18:33:29 +02:00
|
|
|
self.process(binary.left, **kw),
|
|
|
|
self.process(binary.right, **kw)
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def render_literal_value(self, value, type_):
|
|
|
|
"""
|
|
|
|
For date and datetime values, convert to a string
|
|
|
|
format acceptable to MSSQL. That seems to be the
|
|
|
|
so-called ODBC canonical date format which looks
|
|
|
|
like this:
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
yyyy-mm-dd hh:mi:ss.mmm(24h)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
For other data types, call the base class implementation.
|
|
|
|
"""
|
|
|
|
# datetime and date are both subclasses of datetime.date
|
|
|
|
if issubclass(type(value), datetime.date):
|
|
|
|
# SQL Server wants single quotes around the date string.
|
|
|
|
return "'" + str(value) + "'"
|
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
return super(MSSQLStrictCompiler, self).\
|
|
|
|
render_literal_value(value, type_)
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
class MSDDLCompiler(compiler.DDLCompiler):
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def get_column_specification(self, column, **kwargs):
|
2017-04-15 18:33:29 +02:00
|
|
|
colspec = (
|
|
|
|
self.preparer.format_column(column) + " "
|
|
|
|
+ self.dialect.type_compiler.process(
|
|
|
|
column.type, type_expression=column)
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
if column.nullable is not None:
|
2017-04-15 18:33:29 +02:00
|
|
|
if not column.nullable or column.primary_key or \
|
|
|
|
isinstance(column.default, sa_schema.Sequence):
|
2010-05-07 19:33:49 +02:00
|
|
|
colspec += " NOT NULL"
|
|
|
|
else:
|
|
|
|
colspec += " NULL"
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
if column.table is None:
|
2017-04-15 18:33:29 +02:00
|
|
|
raise exc.CompileError(
|
|
|
|
"mssql requires Table-bound columns "
|
|
|
|
"in order to generate DDL")
|
|
|
|
|
|
|
|
# install an IDENTITY Sequence if we either a sequence or an implicit
|
|
|
|
# IDENTITY column
|
|
|
|
if isinstance(column.default, sa_schema.Sequence):
|
|
|
|
if column.default.start == 0:
|
|
|
|
start = 0
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
start = column.default.start or 1
|
|
|
|
|
|
|
|
colspec += " IDENTITY(%s,%s)" % (start,
|
|
|
|
column.default.increment or 1)
|
|
|
|
elif column is column.table._autoincrement_column:
|
|
|
|
colspec += " IDENTITY(1,1)"
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
|
|
|
default = self.get_column_default_string(column)
|
|
|
|
if default is not None:
|
|
|
|
colspec += " DEFAULT " + default
|
|
|
|
|
|
|
|
return colspec
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def visit_create_index(self, create, include_schema=False):
|
|
|
|
index = create.element
|
|
|
|
self._verify_index_table(index)
|
|
|
|
preparer = self.preparer
|
|
|
|
text = "CREATE "
|
|
|
|
if index.unique:
|
|
|
|
text += "UNIQUE "
|
|
|
|
|
|
|
|
# handle clustering option
|
|
|
|
clustered = index.dialect_options['mssql']['clustered']
|
|
|
|
if clustered is not None:
|
|
|
|
if clustered:
|
|
|
|
text += "CLUSTERED "
|
|
|
|
else:
|
|
|
|
text += "NONCLUSTERED "
|
|
|
|
|
|
|
|
text += "INDEX %s ON %s (%s)" \
|
|
|
|
% (
|
|
|
|
self._prepared_index_name(index,
|
|
|
|
include_schema=include_schema),
|
|
|
|
preparer.format_table(index.table),
|
|
|
|
', '.join(
|
|
|
|
self.sql_compiler.process(expr,
|
|
|
|
include_table=False,
|
|
|
|
literal_binds=True) for
|
|
|
|
expr in index.expressions)
|
2010-05-07 19:33:49 +02:00
|
|
|
)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
# handle other included columns
|
|
|
|
if index.dialect_options['mssql']['include']:
|
|
|
|
inclusions = [index.table.c[col]
|
|
|
|
if isinstance(col, util.string_types) else col
|
|
|
|
for col in
|
|
|
|
index.dialect_options['mssql']['include']
|
|
|
|
]
|
|
|
|
|
|
|
|
text += " INCLUDE (%s)" \
|
|
|
|
% ', '.join([preparer.quote(c.name)
|
|
|
|
for c in inclusions])
|
|
|
|
|
|
|
|
return text
|
|
|
|
|
|
|
|
def visit_drop_index(self, drop):
|
|
|
|
return "\nDROP INDEX %s ON %s" % (
|
|
|
|
self._prepared_index_name(drop.element, include_schema=False),
|
|
|
|
self.preparer.format_table(drop.element.table)
|
|
|
|
)
|
|
|
|
|
|
|
|
def visit_primary_key_constraint(self, constraint):
|
|
|
|
if len(constraint) == 0:
|
|
|
|
return ''
|
|
|
|
text = ""
|
|
|
|
if constraint.name is not None:
|
|
|
|
text += "CONSTRAINT %s " % \
|
|
|
|
self.preparer.format_constraint(constraint)
|
|
|
|
text += "PRIMARY KEY "
|
|
|
|
|
|
|
|
clustered = constraint.dialect_options['mssql']['clustered']
|
|
|
|
if clustered is not None:
|
|
|
|
if clustered:
|
|
|
|
text += "CLUSTERED "
|
|
|
|
else:
|
|
|
|
text += "NONCLUSTERED "
|
|
|
|
|
|
|
|
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
|
|
|
|
for c in constraint)
|
|
|
|
text += self.define_constraint_deferrability(constraint)
|
|
|
|
return text
|
|
|
|
|
|
|
|
def visit_unique_constraint(self, constraint):
|
|
|
|
if len(constraint) == 0:
|
|
|
|
return ''
|
|
|
|
text = ""
|
|
|
|
if constraint.name is not None:
|
|
|
|
text += "CONSTRAINT %s " % \
|
|
|
|
self.preparer.format_constraint(constraint)
|
|
|
|
text += "UNIQUE "
|
|
|
|
|
|
|
|
clustered = constraint.dialect_options['mssql']['clustered']
|
|
|
|
if clustered is not None:
|
|
|
|
if clustered:
|
|
|
|
text += "CLUSTERED "
|
|
|
|
else:
|
|
|
|
text += "NONCLUSTERED "
|
|
|
|
|
|
|
|
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
|
|
|
|
for c in constraint)
|
|
|
|
text += self.define_constraint_deferrability(constraint)
|
|
|
|
return text
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
class MSIdentifierPreparer(compiler.IdentifierPreparer):
|
|
|
|
reserved_words = RESERVED_WORDS
|
|
|
|
|
|
|
|
def __init__(self, dialect):
|
2017-04-15 18:33:29 +02:00
|
|
|
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
|
2010-05-07 19:33:49 +02:00
|
|
|
final_quote=']')
|
|
|
|
|
|
|
|
def _escape_identifier(self, value):
|
|
|
|
return value
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def quote_schema(self, schema, force=None):
|
2010-05-07 19:33:49 +02:00
|
|
|
"""Prepare a quoted table and schema name."""
|
|
|
|
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
|
|
|
|
return result
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
def _db_plus_owner_listing(fn):
|
|
|
|
def wrap(dialect, connection, schema=None, **kw):
|
|
|
|
dbname, owner = _owner_plus_db(dialect, schema)
|
|
|
|
return _switch_db(dbname, connection, fn, dialect, connection,
|
|
|
|
dbname, owner, schema, **kw)
|
|
|
|
return update_wrapper(wrap, fn)
|
|
|
|
|
|
|
|
|
|
|
|
def _db_plus_owner(fn):
|
|
|
|
def wrap(dialect, connection, tablename, schema=None, **kw):
|
|
|
|
dbname, owner = _owner_plus_db(dialect, schema)
|
|
|
|
return _switch_db(dbname, connection, fn, dialect, connection,
|
|
|
|
tablename, dbname, owner, schema, **kw)
|
|
|
|
return update_wrapper(wrap, fn)
|
|
|
|
|
|
|
|
|
|
|
|
def _switch_db(dbname, connection, fn, *arg, **kw):
|
|
|
|
if dbname:
|
|
|
|
current_db = connection.scalar("select db_name()")
|
|
|
|
connection.execute("use %s" % dbname)
|
|
|
|
try:
|
|
|
|
return fn(*arg, **kw)
|
|
|
|
finally:
|
|
|
|
if dbname:
|
|
|
|
connection.execute("use %s" % current_db)
|
|
|
|
|
|
|
|
|
|
|
|
def _owner_plus_db(dialect, schema):
|
|
|
|
if not schema:
|
|
|
|
return None, dialect.default_schema_name
|
|
|
|
elif "." in schema:
|
|
|
|
return schema.split(".", 1)
|
|
|
|
else:
|
|
|
|
return None, schema
|
|
|
|
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
class MSDialect(default.DefaultDialect):
|
|
|
|
name = 'mssql'
|
|
|
|
supports_default_values = True
|
|
|
|
supports_empty_insert = False
|
|
|
|
execution_ctx_cls = MSExecutionContext
|
|
|
|
use_scope_identity = True
|
|
|
|
max_identifier_length = 128
|
|
|
|
schema_name = "dbo"
|
|
|
|
|
|
|
|
colspecs = {
|
2017-04-15 18:33:29 +02:00
|
|
|
sqltypes.DateTime: _MSDateTime,
|
|
|
|
sqltypes.Date: _MSDate,
|
|
|
|
sqltypes.Time: TIME,
|
2010-05-07 19:33:49 +02:00
|
|
|
}
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
engine_config_types = default.DefaultDialect.engine_config_types.union([
|
|
|
|
('legacy_schema_aliasing', util.asbool),
|
|
|
|
])
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
ischema_names = ischema_names
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
supports_native_boolean = False
|
|
|
|
supports_unicode_binds = True
|
|
|
|
postfetch_lastrowid = True
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
server_version_info = ()
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
statement_compiler = MSSQLCompiler
|
|
|
|
ddl_compiler = MSDDLCompiler
|
|
|
|
type_compiler = MSTypeCompiler
|
|
|
|
preparer = MSIdentifierPreparer
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
construct_arguments = [
|
|
|
|
(sa_schema.PrimaryKeyConstraint, {
|
|
|
|
"clustered": None
|
|
|
|
}),
|
|
|
|
(sa_schema.UniqueConstraint, {
|
|
|
|
"clustered": None
|
|
|
|
}),
|
|
|
|
(sa_schema.Index, {
|
|
|
|
"clustered": None,
|
|
|
|
"include": None
|
|
|
|
})
|
|
|
|
]
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def __init__(self,
|
|
|
|
query_timeout=None,
|
|
|
|
use_scope_identity=True,
|
|
|
|
max_identifier_length=None,
|
2017-04-15 18:33:29 +02:00
|
|
|
schema_name="dbo",
|
|
|
|
isolation_level=None,
|
|
|
|
deprecate_large_types=None,
|
|
|
|
legacy_schema_aliasing=False, **opts):
|
2010-05-07 19:33:49 +02:00
|
|
|
self.query_timeout = int(query_timeout or 0)
|
|
|
|
self.schema_name = schema_name
|
|
|
|
|
|
|
|
self.use_scope_identity = use_scope_identity
|
|
|
|
self.max_identifier_length = int(max_identifier_length or 0) or \
|
2017-04-15 18:33:29 +02:00
|
|
|
self.max_identifier_length
|
|
|
|
self.deprecate_large_types = deprecate_large_types
|
|
|
|
self.legacy_schema_aliasing = legacy_schema_aliasing
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
super(MSDialect, self).__init__(**opts)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
self.isolation_level = isolation_level
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def do_savepoint(self, connection, name):
|
2017-04-15 18:33:29 +02:00
|
|
|
# give the DBAPI a push
|
2010-05-07 19:33:49 +02:00
|
|
|
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
|
2017-04-15 18:33:29 +02:00
|
|
|
super(MSDialect, self).do_savepoint(connection, name)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
def do_release_savepoint(self, connection, name):
|
2017-04-15 18:33:29 +02:00
|
|
|
# SQL Server does not support RELEASE SAVEPOINT
|
2010-05-07 19:33:49 +02:00
|
|
|
pass
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
|
|
|
|
'READ COMMITTED', 'REPEATABLE READ',
|
|
|
|
'SNAPSHOT'])
|
|
|
|
|
|
|
|
def set_isolation_level(self, connection, level):
|
|
|
|
level = level.replace('_', ' ')
|
|
|
|
if level not in self._isolation_lookup:
|
|
|
|
raise exc.ArgumentError(
|
|
|
|
"Invalid value '%s' for isolation_level. "
|
|
|
|
"Valid isolation levels for %s are %s" %
|
|
|
|
(level, self.name, ", ".join(self._isolation_lookup))
|
|
|
|
)
|
|
|
|
cursor = connection.cursor()
|
|
|
|
cursor.execute(
|
|
|
|
"SET TRANSACTION ISOLATION LEVEL %s" % level)
|
|
|
|
cursor.close()
|
|
|
|
|
|
|
|
def get_isolation_level(self, connection):
|
|
|
|
if self.server_version_info < MS_2005_VERSION:
|
|
|
|
raise NotImplementedError(
|
|
|
|
"Can't fetch isolation level prior to SQL Server 2005")
|
|
|
|
|
|
|
|
cursor = connection.cursor()
|
|
|
|
cursor.execute("""
|
|
|
|
SELECT CASE transaction_isolation_level
|
|
|
|
WHEN 0 THEN NULL
|
|
|
|
WHEN 1 THEN 'READ UNCOMMITTED'
|
|
|
|
WHEN 2 THEN 'READ COMMITTED'
|
|
|
|
WHEN 3 THEN 'REPEATABLE READ'
|
|
|
|
WHEN 4 THEN 'SERIALIZABLE'
|
|
|
|
WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
|
|
|
|
FROM sys.dm_exec_sessions
|
|
|
|
where session_id = @@SPID
|
|
|
|
""")
|
|
|
|
val = cursor.fetchone()[0]
|
|
|
|
cursor.close()
|
|
|
|
return val.upper()
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def initialize(self, connection):
|
|
|
|
super(MSDialect, self).initialize(connection)
|
2017-04-15 18:33:29 +02:00
|
|
|
self._setup_version_attributes()
|
|
|
|
|
|
|
|
def on_connect(self):
|
|
|
|
if self.isolation_level is not None:
|
|
|
|
def connect(conn):
|
|
|
|
self.set_isolation_level(conn, self.isolation_level)
|
|
|
|
return connect
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _setup_version_attributes(self):
|
|
|
|
if self.server_version_info[0] not in list(range(8, 17)):
|
|
|
|
util.warn(
|
|
|
|
"Unrecognized server version info '%s'. Some SQL Server "
|
|
|
|
"features may not function properly." %
|
|
|
|
".".join(str(x) for x in self.server_version_info))
|
2010-05-07 19:33:49 +02:00
|
|
|
if self.server_version_info >= MS_2005_VERSION and \
|
2017-04-15 18:33:29 +02:00
|
|
|
'implicit_returning' not in self.__dict__:
|
2010-05-07 19:33:49 +02:00
|
|
|
self.implicit_returning = True
|
2017-04-15 18:33:29 +02:00
|
|
|
if self.server_version_info >= MS_2008_VERSION:
|
|
|
|
self.supports_multivalues_insert = True
|
|
|
|
if self.deprecate_large_types is None:
|
|
|
|
self.deprecate_large_types = \
|
|
|
|
self.server_version_info >= MS_2012_VERSION
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
def _get_default_schema_name(self, connection):
|
|
|
|
if self.server_version_info < MS_2005_VERSION:
|
|
|
|
return self.schema_name
|
|
|
|
else:
|
|
|
|
query = sql.text("SELECT schema_name()")
|
|
|
|
default_schema_name = connection.scalar(query)
|
|
|
|
if default_schema_name is not None:
|
|
|
|
return util.text_type(default_schema_name)
|
|
|
|
else:
|
|
|
|
return self.schema_name
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def has_table(self, connection, tablename, dbname, owner, schema):
|
2010-05-07 19:33:49 +02:00
|
|
|
columns = ischema.columns
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
whereclause = columns.c.table_name == tablename
|
|
|
|
|
|
|
|
if owner:
|
|
|
|
whereclause = sql.and_(whereclause,
|
|
|
|
columns.c.table_schema == owner)
|
2010-05-07 19:33:49 +02:00
|
|
|
s = sql.select([columns], whereclause)
|
|
|
|
c = connection.execute(s)
|
|
|
|
return c.first() is not None
|
|
|
|
|
|
|
|
@reflection.cache
|
|
|
|
def get_schema_names(self, connection, **kw):
|
|
|
|
s = sql.select([ischema.schemata.c.schema_name],
|
2017-04-15 18:33:29 +02:00
|
|
|
order_by=[ischema.schemata.c.schema_name]
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
schema_names = [r[0] for r in connection.execute(s)]
|
|
|
|
return schema_names
|
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner_listing
|
|
|
|
def get_table_names(self, connection, dbname, owner, schema, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
tables = ischema.tables
|
|
|
|
s = sql.select([tables.c.table_name],
|
2017-04-15 18:33:29 +02:00
|
|
|
sql.and_(
|
|
|
|
tables.c.table_schema == owner,
|
|
|
|
tables.c.table_type == 'BASE TABLE'
|
|
|
|
),
|
2010-05-07 19:33:49 +02:00
|
|
|
order_by=[tables.c.table_name]
|
|
|
|
)
|
|
|
|
table_names = [r[0] for r in connection.execute(s)]
|
|
|
|
return table_names
|
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner_listing
|
|
|
|
def get_view_names(self, connection, dbname, owner, schema, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
tables = ischema.tables
|
|
|
|
s = sql.select([tables.c.table_name],
|
2017-04-15 18:33:29 +02:00
|
|
|
sql.and_(
|
|
|
|
tables.c.table_schema == owner,
|
|
|
|
tables.c.table_type == 'VIEW'
|
|
|
|
),
|
2010-05-07 19:33:49 +02:00
|
|
|
order_by=[tables.c.table_name]
|
|
|
|
)
|
|
|
|
view_names = [r[0] for r in connection.execute(s)]
|
|
|
|
return view_names
|
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
|
|
|
|
# using system catalogs, don't support index reflection
|
|
|
|
# below MS 2005
|
|
|
|
if self.server_version_info < MS_2005_VERSION:
|
2010-05-07 19:33:49 +02:00
|
|
|
return []
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
rp = connection.execute(
|
|
|
|
sql.text("select ind.index_id, ind.is_unique, ind.name "
|
|
|
|
"from sys.indexes as ind join sys.tables as tab on "
|
|
|
|
"ind.object_id=tab.object_id "
|
|
|
|
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
|
|
|
|
"where tab.name = :tabname "
|
|
|
|
"and sch.name=:schname "
|
|
|
|
"and ind.is_primary_key=0",
|
|
|
|
bindparams=[
|
|
|
|
sql.bindparam('tabname', tablename,
|
|
|
|
sqltypes.String(convert_unicode=True)),
|
|
|
|
sql.bindparam('schname', owner,
|
|
|
|
sqltypes.String(convert_unicode=True))
|
|
|
|
],
|
|
|
|
typemap={
|
|
|
|
'name': sqltypes.Unicode()
|
|
|
|
}
|
|
|
|
)
|
|
|
|
)
|
|
|
|
indexes = {}
|
2010-05-07 19:33:49 +02:00
|
|
|
for row in rp:
|
2017-04-15 18:33:29 +02:00
|
|
|
indexes[row['index_id']] = {
|
|
|
|
'name': row['name'],
|
|
|
|
'unique': row['is_unique'] == 1,
|
|
|
|
'column_names': []
|
|
|
|
}
|
|
|
|
rp = connection.execute(
|
|
|
|
sql.text(
|
|
|
|
"select ind_col.index_id, ind_col.object_id, col.name "
|
|
|
|
"from sys.columns as col "
|
|
|
|
"join sys.tables as tab on tab.object_id=col.object_id "
|
|
|
|
"join sys.index_columns as ind_col on "
|
|
|
|
"(ind_col.column_id=col.column_id and "
|
|
|
|
"ind_col.object_id=tab.object_id) "
|
|
|
|
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
|
|
|
|
"where tab.name=:tabname "
|
|
|
|
"and sch.name=:schname",
|
|
|
|
bindparams=[
|
|
|
|
sql.bindparam('tabname', tablename,
|
|
|
|
sqltypes.String(convert_unicode=True)),
|
|
|
|
sql.bindparam('schname', owner,
|
|
|
|
sqltypes.String(convert_unicode=True))
|
|
|
|
],
|
|
|
|
typemap={'name': sqltypes.Unicode()}
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for row in rp:
|
|
|
|
if row['index_id'] in indexes:
|
|
|
|
indexes[row['index_id']]['column_names'].append(row['name'])
|
|
|
|
|
|
|
|
return list(indexes.values())
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def get_view_definition(self, connection, viewname,
|
|
|
|
dbname, owner, schema, **kw):
|
|
|
|
rp = connection.execute(
|
|
|
|
sql.text(
|
|
|
|
"select definition from sys.sql_modules as mod, "
|
|
|
|
"sys.views as views, "
|
|
|
|
"sys.schemas as sch"
|
|
|
|
" where "
|
|
|
|
"mod.object_id=views.object_id and "
|
|
|
|
"views.schema_id=sch.schema_id and "
|
|
|
|
"views.name=:viewname and sch.name=:schname",
|
|
|
|
bindparams=[
|
|
|
|
sql.bindparam('viewname', viewname,
|
|
|
|
sqltypes.String(convert_unicode=True)),
|
|
|
|
sql.bindparam('schname', owner,
|
|
|
|
sqltypes.String(convert_unicode=True))
|
|
|
|
]
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
if rp:
|
|
|
|
view_def = rp.scalar()
|
|
|
|
return view_def
|
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
# Get base columns
|
|
|
|
columns = ischema.columns
|
2017-04-15 18:33:29 +02:00
|
|
|
if owner:
|
|
|
|
whereclause = sql.and_(columns.c.table_name == tablename,
|
|
|
|
columns.c.table_schema == owner)
|
2010-05-07 19:33:49 +02:00
|
|
|
else:
|
2017-04-15 18:33:29 +02:00
|
|
|
whereclause = columns.c.table_name == tablename
|
|
|
|
s = sql.select([columns], whereclause,
|
|
|
|
order_by=[columns.c.ordinal_position])
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
c = connection.execute(s)
|
|
|
|
cols = []
|
|
|
|
while True:
|
|
|
|
row = c.fetchone()
|
|
|
|
if row is None:
|
|
|
|
break
|
2017-04-15 18:33:29 +02:00
|
|
|
(name, type, nullable, charlen,
|
|
|
|
numericprec, numericscale, default, collation) = (
|
2010-05-07 19:33:49 +02:00
|
|
|
row[columns.c.column_name],
|
|
|
|
row[columns.c.data_type],
|
|
|
|
row[columns.c.is_nullable] == 'YES',
|
|
|
|
row[columns.c.character_maximum_length],
|
|
|
|
row[columns.c.numeric_precision],
|
|
|
|
row[columns.c.numeric_scale],
|
|
|
|
row[columns.c.column_default],
|
|
|
|
row[columns.c.collation_name]
|
|
|
|
)
|
|
|
|
coltype = self.ischema_names.get(type, None)
|
|
|
|
|
|
|
|
kwargs = {}
|
2017-04-15 18:33:29 +02:00
|
|
|
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
|
|
|
|
MSNText, MSBinary, MSVarBinary,
|
|
|
|
sqltypes.LargeBinary):
|
|
|
|
if charlen == -1:
|
|
|
|
charlen = None
|
2010-05-07 19:33:49 +02:00
|
|
|
kwargs['length'] = charlen
|
|
|
|
if collation:
|
|
|
|
kwargs['collation'] = collation
|
|
|
|
|
|
|
|
if coltype is None:
|
2017-04-15 18:33:29 +02:00
|
|
|
util.warn(
|
|
|
|
"Did not recognize type '%s' of column '%s'" %
|
|
|
|
(type, name))
|
2010-05-07 19:33:49 +02:00
|
|
|
coltype = sqltypes.NULLTYPE
|
2017-04-15 18:33:29 +02:00
|
|
|
else:
|
|
|
|
if issubclass(coltype, sqltypes.Numeric) and \
|
|
|
|
coltype is not MSReal:
|
|
|
|
kwargs['scale'] = numericscale
|
|
|
|
kwargs['precision'] = numericprec
|
2010-05-07 19:33:49 +02:00
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
coltype = coltype(**kwargs)
|
2010-05-07 19:33:49 +02:00
|
|
|
cdict = {
|
2017-04-15 18:33:29 +02:00
|
|
|
'name': name,
|
|
|
|
'type': coltype,
|
|
|
|
'nullable': nullable,
|
|
|
|
'default': default,
|
|
|
|
'autoincrement': False,
|
2010-05-07 19:33:49 +02:00
|
|
|
}
|
|
|
|
cols.append(cdict)
|
|
|
|
# autoincrement and identity
|
|
|
|
colmap = {}
|
|
|
|
for col in cols:
|
|
|
|
colmap[col['name']] = col
|
|
|
|
# We also run an sp_columns to check for identity columns:
|
|
|
|
cursor = connection.execute("sp_columns @table_name = '%s', "
|
2017-04-15 18:33:29 +02:00
|
|
|
"@table_owner = '%s'"
|
|
|
|
% (tablename, owner))
|
2010-05-07 19:33:49 +02:00
|
|
|
ic = None
|
|
|
|
while True:
|
|
|
|
row = cursor.fetchone()
|
|
|
|
if row is None:
|
|
|
|
break
|
|
|
|
(col_name, type_name) = row[3], row[5]
|
|
|
|
if type_name.endswith("identity") and col_name in colmap:
|
|
|
|
ic = col_name
|
|
|
|
colmap[col_name]['autoincrement'] = True
|
|
|
|
colmap[col_name]['sequence'] = dict(
|
2017-04-15 18:33:29 +02:00
|
|
|
name='%s_identity' % col_name)
|
2010-05-07 19:33:49 +02:00
|
|
|
break
|
|
|
|
cursor.close()
|
|
|
|
|
|
|
|
if ic is not None and self.server_version_info >= MS_2005_VERSION:
|
2017-04-15 18:33:29 +02:00
|
|
|
table_fullname = "%s.%s" % (owner, tablename)
|
2010-05-07 19:33:49 +02:00
|
|
|
cursor = connection.execute(
|
2017-04-15 18:33:29 +02:00
|
|
|
"select ident_seed('%s'), ident_incr('%s')"
|
2010-05-07 19:33:49 +02:00
|
|
|
% (table_fullname, table_fullname)
|
2017-04-15 18:33:29 +02:00
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
row = cursor.first()
|
|
|
|
if row is not None and row[0] is not None:
|
|
|
|
colmap[ic]['sequence'].update({
|
2017-04-15 18:33:29 +02:00
|
|
|
'start': int(row[0]),
|
|
|
|
'increment': int(row[1])
|
2010-05-07 19:33:49 +02:00
|
|
|
})
|
|
|
|
return cols
|
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def get_pk_constraint(self, connection, tablename,
|
|
|
|
dbname, owner, schema, **kw):
|
2010-05-07 19:33:49 +02:00
|
|
|
pkeys = []
|
2017-04-15 18:33:29 +02:00
|
|
|
TC = ischema.constraints
|
|
|
|
C = ischema.key_constraints.alias('C')
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
# Primary key constraints
|
2017-04-15 18:33:29 +02:00
|
|
|
s = sql.select([C.c.column_name,
|
|
|
|
TC.c.constraint_type,
|
|
|
|
C.c.constraint_name],
|
|
|
|
sql.and_(TC.c.constraint_name == C.c.constraint_name,
|
|
|
|
TC.c.table_schema == C.c.table_schema,
|
|
|
|
C.c.table_name == tablename,
|
|
|
|
C.c.table_schema == owner)
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
c = connection.execute(s)
|
2017-04-15 18:33:29 +02:00
|
|
|
constraint_name = None
|
2010-05-07 19:33:49 +02:00
|
|
|
for row in c:
|
|
|
|
if 'PRIMARY' in row[TC.c.constraint_type.name]:
|
|
|
|
pkeys.append(row[0])
|
2017-04-15 18:33:29 +02:00
|
|
|
if constraint_name is None:
|
|
|
|
constraint_name = row[C.c.constraint_name.name]
|
|
|
|
return {'constrained_columns': pkeys, 'name': constraint_name}
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
@reflection.cache
|
2017-04-15 18:33:29 +02:00
|
|
|
@_db_plus_owner
|
|
|
|
def get_foreign_keys(self, connection, tablename,
|
|
|
|
dbname, owner, schema, **kw):
|
|
|
|
RR = ischema.ref_constraints
|
|
|
|
C = ischema.key_constraints.alias('C')
|
|
|
|
R = ischema.key_constraints.alias('R')
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
# Foreign key constraints
|
|
|
|
s = sql.select([C.c.column_name,
|
|
|
|
R.c.table_schema, R.c.table_name, R.c.column_name,
|
2017-04-15 18:33:29 +02:00
|
|
|
RR.c.constraint_name, RR.c.match_option,
|
|
|
|
RR.c.update_rule,
|
2010-05-07 19:33:49 +02:00
|
|
|
RR.c.delete_rule],
|
|
|
|
sql.and_(C.c.table_name == tablename,
|
2017-04-15 18:33:29 +02:00
|
|
|
C.c.table_schema == owner,
|
2010-05-07 19:33:49 +02:00
|
|
|
C.c.constraint_name == RR.c.constraint_name,
|
2017-04-15 18:33:29 +02:00
|
|
|
R.c.constraint_name ==
|
|
|
|
RR.c.unique_constraint_name,
|
2010-05-07 19:33:49 +02:00
|
|
|
C.c.ordinal_position == R.c.ordinal_position
|
|
|
|
),
|
2017-04-15 18:33:29 +02:00
|
|
|
order_by=[RR.c.constraint_name, R.c.ordinal_position]
|
|
|
|
)
|
2010-05-07 19:33:49 +02:00
|
|
|
|
|
|
|
# group rows by constraint ID, to handle multi-column FKs
|
|
|
|
fkeys = []
|
|
|
|
fknm, scols, rcols = (None, [], [])
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
def fkey_rec():
|
|
|
|
return {
|
2017-04-15 18:33:29 +02:00
|
|
|
'name': None,
|
|
|
|
'constrained_columns': [],
|
|
|
|
'referred_schema': None,
|
|
|
|
'referred_table': None,
|
|
|
|
'referred_columns': []
|
2010-05-07 19:33:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fkeys = util.defaultdict(fkey_rec)
|
2017-04-15 18:33:29 +02:00
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
for r in connection.execute(s).fetchall():
|
|
|
|
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
|
|
|
|
|
|
|
|
rec = fkeys[rfknm]
|
|
|
|
rec['name'] = rfknm
|
|
|
|
if not rec['referred_table']:
|
|
|
|
rec['referred_table'] = rtbl
|
2017-04-15 18:33:29 +02:00
|
|
|
if schema is not None or owner != rschema:
|
|
|
|
if dbname:
|
|
|
|
rschema = dbname + "." + rschema
|
2010-05-07 19:33:49 +02:00
|
|
|
rec['referred_schema'] = rschema
|
2017-04-15 18:33:29 +02:00
|
|
|
|
|
|
|
local_cols, remote_cols = \
|
|
|
|
rec['constrained_columns'],\
|
|
|
|
rec['referred_columns']
|
|
|
|
|
2010-05-07 19:33:49 +02:00
|
|
|
local_cols.append(scol)
|
|
|
|
remote_cols.append(rcol)
|
|
|
|
|
2017-04-15 18:33:29 +02:00
|
|
|
return list(fkeys.values())
|