dibbler/sqlalchemy/dialects/sybase/base.py

822 lines
28 KiB
Python
Raw Normal View History

2010-05-07 19:33:49 +02:00
# sybase/base.py
2017-04-15 18:33:29 +02:00
# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
2010-05-07 19:33:49 +02:00
# get_select_precolumns(), limit_clause() implementation
2017-04-15 18:33:29 +02:00
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
2010-05-07 19:33:49 +02:00
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
2017-04-15 18:33:29 +02:00
"""
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
.. dialect:: sybase
:name: Sybase
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
.. note::
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
2010-05-07 19:33:49 +02:00
import operator
2017-04-15 18:33:29 +02:00
import re
2010-05-07 19:33:49 +02:00
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
2017-04-15 18:33:29 +02:00
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
2010-05-07 19:33:49 +02:00
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
2017-04-15 18:33:29 +02:00
])
2010-05-07 19:33:49 +02:00
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
2017-04-15 18:33:29 +02:00
return str(value) # decode("ucs-2")
2010-05-07 19:33:49 +02:00
else:
return None
return process
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
2017-04-15 18:33:29 +02:00
def visit_large_binary(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return self.visit_IMAGE(type_)
2017-04-15 18:33:29 +02:00
def visit_boolean(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return self.visit_BIT(type_)
2017-04-15 18:33:29 +02:00
def visit_unicode(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return self.visit_NVARCHAR(type_)
2017-04-15 18:33:29 +02:00
def visit_UNICHAR(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "UNICHAR(%d)" % type_.length
2017-04-15 18:33:29 +02:00
def visit_UNIVARCHAR(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "UNIVARCHAR(%d)" % type_.length
2017-04-15 18:33:29 +02:00
def visit_UNITEXT(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "UNITEXT"
2017-04-15 18:33:29 +02:00
def visit_TINYINT(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "TINYINT"
2017-04-15 18:33:29 +02:00
def visit_IMAGE(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "IMAGE"
2017-04-15 18:33:29 +02:00
def visit_BIT(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "BIT"
2017-04-15 18:33:29 +02:00
def visit_MONEY(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "MONEY"
2017-04-15 18:33:29 +02:00
def visit_SMALLMONEY(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "SMALLMONEY"
2017-04-15 18:33:29 +02:00
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
2010-05-07 19:33:49 +02:00
return "UNIQUEIDENTIFIER"
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
ischema_names = {
'bigint': BIGINT,
2017-04-15 18:33:29 +02:00
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
2010-05-07 19:33:49 +02:00
'bit': BIT,
2017-04-15 18:33:29 +02:00
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
2010-05-07 19:33:49 +02:00
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
2017-04-15 18:33:29 +02:00
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
2010-05-07 19:33:49 +02:00
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
"""
raise NotImplementedError()
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
if insert_has_sequence:
2017-04-15 18:33:29 +02:00
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
2010-05-07 19:33:49 +02:00
else:
self._enable_identity_insert = False
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
if self._enable_identity_insert:
2017-04-15 18:33:29 +02:00
self.cursor.execute(
"SET IDENTITY_INSERT %s ON" %
2010-05-07 19:33:49 +02:00
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
2017-04-15 18:33:29 +02:00
# database settings. this error message should be improved to
2010-05-07 19:33:49 +02:00
# include a note about that.
if not self.should_autocommit:
2017-04-15 18:33:29 +02:00
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
2010-05-07 19:33:49 +02:00
def post_exec(self):
2017-04-15 18:33:29 +02:00
if self.isddl:
2010-05-07 19:33:49 +02:00
self.set_ddl_autocommit(self.root_connection, False)
2017-04-15 18:33:29 +02:00
if self._enable_identity_insert:
2010-05-07 19:33:49 +02:00
self.cursor.execute(
2017-04-15 18:33:29 +02:00
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
2010-05-07 19:33:49 +02:00
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
2017-04-15 18:33:29 +02:00
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
def get_select_precolumns(self, select, **kw):
2010-05-07 19:33:49 +02:00
s = select._distinct and "DISTINCT " or ""
2017-04-15 18:33:29 +02:00
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
raise NotImplementedError("Sybase ASE does not support OFFSET")
2010-05-07 19:33:49 +02:00
return s
def get_from_hint_text(self, table, text):
return text
2017-04-15 18:33:29 +02:00
def limit_clause(self, select, **kw):
2010-05-07 19:33:49 +02:00
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
2017-04-15 18:33:29 +02:00
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
2010-05-07 19:33:49 +02:00
def for_update_clause(self, select):
2017-04-15 18:33:29 +02:00
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
2010-05-07 19:33:49 +02:00
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
2017-04-15 18:33:29 +02:00
self.dialect.type_compiler.process(
column.type, type_expression=column)
2010-05-07 19:33:49 +02:00
if column.table is None:
2017-04-15 18:33:29 +02:00
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
2010-05-07 19:33:49 +02:00
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
2017-04-15 18:33:29 +02:00
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
2010-05-07 19:33:49 +02:00
if sequence:
2017-04-15 18:33:29 +02:00
start, increment = sequence.start or 1, \
sequence.increment or 1
2010-05-07 19:33:49 +02:00
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
2017-04-15 18:33:29 +02:00
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
2010-05-07 19:33:49 +02:00
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
2017-04-15 18:33:29 +02:00
self._prepared_index_name(drop.element,
include_schema=False)
)
2010-05-07 19:33:49 +02:00
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
2017-04-15 18:33:29 +02:00
2010-05-07 19:33:49 +02:00
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
2017-04-15 18:33:29 +02:00
inspector = SybaseInspector
construct_arguments = []
2010-05-07 19:33:49 +02:00
def _get_default_schema_name(self, connection):
return connection.scalar(
2017-04-15 18:33:29 +02:00
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
2010-05-07 19:33:49 +02:00
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
2017-04-15 18:33:29 +02:00
self.server_version_info < (15, ):
2010-05-07 19:33:49 +02:00
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
2017-04-15 18:33:29 +02:00
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement),
default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id).fetchall()
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
2010-05-07 19:33:49 +02:00
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
2017-04-15 18:33:29 +02:00
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
2010-05-07 19:33:49 +02:00
if schema is None:
schema = self.default_schema_name
2017-04-15 18:33:29 +02:00
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
2010-05-07 19:33:49 +02:00
2017-04-15 18:33:29 +02:00
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True