morro
This commit is contained in:
12
sqlalchemy/dialects/__init__.py
Normal file
12
sqlalchemy/dialects/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
__all__ = (
|
||||
# 'access',
|
||||
# 'firebird',
|
||||
# 'informix',
|
||||
# 'maxdb',
|
||||
# 'mssql',
|
||||
'mysql',
|
||||
'oracle',
|
||||
'postgresql',
|
||||
'sqlite',
|
||||
# 'sybase',
|
||||
)
|
0
sqlalchemy/dialects/access/__init__.py
Normal file
0
sqlalchemy/dialects/access/__init__.py
Normal file
418
sqlalchemy/dialects/access/base.py
Normal file
418
sqlalchemy/dialects/access/base.py
Normal file
@@ -0,0 +1,418 @@
|
||||
# access.py
|
||||
# Copyright (C) 2007 Paul Johnston, paj@pajhome.org.uk
|
||||
# Portions derived from jet2sql.py by Matt Keranen, mksql@yahoo.com
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
"""
|
||||
Support for the Microsoft Access database.
|
||||
|
||||
This dialect is *not* ported to SQLAlchemy 0.6.
|
||||
|
||||
This dialect is *not* tested on SQLAlchemy 0.6.
|
||||
|
||||
|
||||
"""
|
||||
from sqlalchemy import sql, schema, types, exc, pool
|
||||
from sqlalchemy.sql import compiler, expression
|
||||
from sqlalchemy.engine import default, base, reflection
|
||||
from sqlalchemy import processors
|
||||
|
||||
class AcNumeric(types.Numeric):
|
||||
def get_col_spec(self):
|
||||
return "NUMERIC"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
return processors.to_str
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
class AcFloat(types.Float):
|
||||
def get_col_spec(self):
|
||||
return "FLOAT"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
"""By converting to string, we can use Decimal types round-trip."""
|
||||
return processors.to_str
|
||||
|
||||
class AcInteger(types.Integer):
|
||||
def get_col_spec(self):
|
||||
return "INTEGER"
|
||||
|
||||
class AcTinyInteger(types.Integer):
|
||||
def get_col_spec(self):
|
||||
return "TINYINT"
|
||||
|
||||
class AcSmallInteger(types.SmallInteger):
|
||||
def get_col_spec(self):
|
||||
return "SMALLINT"
|
||||
|
||||
class AcDateTime(types.DateTime):
|
||||
def __init__(self, *a, **kw):
|
||||
super(AcDateTime, self).__init__(False)
|
||||
|
||||
def get_col_spec(self):
|
||||
return "DATETIME"
|
||||
|
||||
class AcDate(types.Date):
|
||||
def __init__(self, *a, **kw):
|
||||
super(AcDate, self).__init__(False)
|
||||
|
||||
def get_col_spec(self):
|
||||
return "DATETIME"
|
||||
|
||||
class AcText(types.Text):
|
||||
def get_col_spec(self):
|
||||
return "MEMO"
|
||||
|
||||
class AcString(types.String):
|
||||
def get_col_spec(self):
|
||||
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
|
||||
|
||||
class AcUnicode(types.Unicode):
|
||||
def get_col_spec(self):
|
||||
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
class AcChar(types.CHAR):
|
||||
def get_col_spec(self):
|
||||
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
|
||||
|
||||
class AcBinary(types.LargeBinary):
|
||||
def get_col_spec(self):
|
||||
return "BINARY"
|
||||
|
||||
class AcBoolean(types.Boolean):
|
||||
def get_col_spec(self):
|
||||
return "YESNO"
|
||||
|
||||
class AcTimeStamp(types.TIMESTAMP):
|
||||
def get_col_spec(self):
|
||||
return "TIMESTAMP"
|
||||
|
||||
class AccessExecutionContext(default.DefaultExecutionContext):
|
||||
def _has_implicit_sequence(self, column):
|
||||
if column.primary_key and column.autoincrement:
|
||||
if isinstance(column.type, types.Integer) and not column.foreign_keys:
|
||||
if column.default is None or (isinstance(column.default, schema.Sequence) and \
|
||||
column.default.optional):
|
||||
return True
|
||||
return False
|
||||
|
||||
def post_exec(self):
|
||||
"""If we inserted into a row with a COUNTER column, fetch the ID"""
|
||||
|
||||
if self.compiled.isinsert:
|
||||
tbl = self.compiled.statement.table
|
||||
if not hasattr(tbl, 'has_sequence'):
|
||||
tbl.has_sequence = None
|
||||
for column in tbl.c:
|
||||
if getattr(column, 'sequence', False) or self._has_implicit_sequence(column):
|
||||
tbl.has_sequence = column
|
||||
break
|
||||
|
||||
if bool(tbl.has_sequence):
|
||||
# TBD: for some reason _last_inserted_ids doesn't exist here
|
||||
# (but it does at corresponding point in mssql???)
|
||||
#if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:
|
||||
self.cursor.execute("SELECT @@identity AS lastrowid")
|
||||
row = self.cursor.fetchone()
|
||||
self._last_inserted_ids = [int(row[0])] #+ self._last_inserted_ids[1:]
|
||||
# print "LAST ROW ID", self._last_inserted_ids
|
||||
|
||||
super(AccessExecutionContext, self).post_exec()
|
||||
|
||||
|
||||
const, daoEngine = None, None
|
||||
class AccessDialect(default.DefaultDialect):
|
||||
colspecs = {
|
||||
types.Unicode : AcUnicode,
|
||||
types.Integer : AcInteger,
|
||||
types.SmallInteger: AcSmallInteger,
|
||||
types.Numeric : AcNumeric,
|
||||
types.Float : AcFloat,
|
||||
types.DateTime : AcDateTime,
|
||||
types.Date : AcDate,
|
||||
types.String : AcString,
|
||||
types.LargeBinary : AcBinary,
|
||||
types.Boolean : AcBoolean,
|
||||
types.Text : AcText,
|
||||
types.CHAR: AcChar,
|
||||
types.TIMESTAMP: AcTimeStamp,
|
||||
}
|
||||
name = 'access'
|
||||
supports_sane_rowcount = False
|
||||
supports_sane_multi_rowcount = False
|
||||
|
||||
ported_sqla_06 = False
|
||||
|
||||
def type_descriptor(self, typeobj):
|
||||
newobj = types.adapt_type(typeobj, self.colspecs)
|
||||
return newobj
|
||||
|
||||
def __init__(self, **params):
|
||||
super(AccessDialect, self).__init__(**params)
|
||||
self.text_as_varchar = False
|
||||
self._dtbs = None
|
||||
|
||||
def dbapi(cls):
|
||||
import win32com.client, pythoncom
|
||||
|
||||
global const, daoEngine
|
||||
if const is None:
|
||||
const = win32com.client.constants
|
||||
for suffix in (".36", ".35", ".30"):
|
||||
try:
|
||||
daoEngine = win32com.client.gencache.EnsureDispatch("DAO.DBEngine" + suffix)
|
||||
break
|
||||
except pythoncom.com_error:
|
||||
pass
|
||||
else:
|
||||
raise exc.InvalidRequestError("Can't find a DB engine. Check http://support.microsoft.com/kb/239114 for details.")
|
||||
|
||||
import pyodbc as module
|
||||
return module
|
||||
dbapi = classmethod(dbapi)
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args()
|
||||
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
|
||||
connectors.append("Dbq=%s" % opts["database"])
|
||||
user = opts.get("username", None)
|
||||
if user:
|
||||
connectors.append("UID=%s" % user)
|
||||
connectors.append("PWD=%s" % opts.get("password", ""))
|
||||
return [[";".join(connectors)], {}]
|
||||
|
||||
def last_inserted_ids(self):
|
||||
return self.context.last_inserted_ids
|
||||
|
||||
def do_execute(self, cursor, statement, params, **kwargs):
|
||||
if params == {}:
|
||||
params = ()
|
||||
super(AccessDialect, self).do_execute(cursor, statement, params, **kwargs)
|
||||
|
||||
def _execute(self, c, statement, parameters):
|
||||
try:
|
||||
if parameters == {}:
|
||||
parameters = ()
|
||||
c.execute(statement, parameters)
|
||||
self.context.rowcount = c.rowcount
|
||||
except Exception, e:
|
||||
raise exc.DBAPIError.instance(statement, parameters, e)
|
||||
|
||||
def has_table(self, connection, tablename, schema=None):
|
||||
# This approach seems to be more reliable that using DAO
|
||||
try:
|
||||
connection.execute('select top 1 * from [%s]' % tablename)
|
||||
return True
|
||||
except Exception, e:
|
||||
return False
|
||||
|
||||
def reflecttable(self, connection, table, include_columns):
|
||||
# This is defined in the function, as it relies on win32com constants,
|
||||
# that aren't imported until dbapi method is called
|
||||
if not hasattr(self, 'ischema_names'):
|
||||
self.ischema_names = {
|
||||
const.dbByte: AcBinary,
|
||||
const.dbInteger: AcInteger,
|
||||
const.dbLong: AcInteger,
|
||||
const.dbSingle: AcFloat,
|
||||
const.dbDouble: AcFloat,
|
||||
const.dbDate: AcDateTime,
|
||||
const.dbLongBinary: AcBinary,
|
||||
const.dbMemo: AcText,
|
||||
const.dbBoolean: AcBoolean,
|
||||
const.dbText: AcUnicode, # All Access strings are unicode
|
||||
const.dbCurrency: AcNumeric,
|
||||
}
|
||||
|
||||
# A fresh DAO connection is opened for each reflection
|
||||
# This is necessary, so we get the latest updates
|
||||
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
|
||||
|
||||
try:
|
||||
for tbl in dtbs.TableDefs:
|
||||
if tbl.Name.lower() == table.name.lower():
|
||||
break
|
||||
else:
|
||||
raise exc.NoSuchTableError(table.name)
|
||||
|
||||
for col in tbl.Fields:
|
||||
coltype = self.ischema_names[col.Type]
|
||||
if col.Type == const.dbText:
|
||||
coltype = coltype(col.Size)
|
||||
|
||||
colargs = \
|
||||
{
|
||||
'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField),
|
||||
}
|
||||
default = col.DefaultValue
|
||||
|
||||
if col.Attributes & const.dbAutoIncrField:
|
||||
colargs['default'] = schema.Sequence(col.Name + '_seq')
|
||||
elif default:
|
||||
if col.Type == const.dbBoolean:
|
||||
default = default == 'Yes' and '1' or '0'
|
||||
colargs['server_default'] = schema.DefaultClause(sql.text(default))
|
||||
|
||||
table.append_column(schema.Column(col.Name, coltype, **colargs))
|
||||
|
||||
# TBD: check constraints
|
||||
|
||||
# Find primary key columns first
|
||||
for idx in tbl.Indexes:
|
||||
if idx.Primary:
|
||||
for col in idx.Fields:
|
||||
thecol = table.c[col.Name]
|
||||
table.primary_key.add(thecol)
|
||||
if isinstance(thecol.type, AcInteger) and \
|
||||
not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)):
|
||||
thecol.autoincrement = False
|
||||
|
||||
# Then add other indexes
|
||||
for idx in tbl.Indexes:
|
||||
if not idx.Primary:
|
||||
if len(idx.Fields) == 1:
|
||||
col = table.c[idx.Fields[0].Name]
|
||||
if not col.primary_key:
|
||||
col.index = True
|
||||
col.unique = idx.Unique
|
||||
else:
|
||||
pass # TBD: multi-column indexes
|
||||
|
||||
|
||||
for fk in dtbs.Relations:
|
||||
if fk.ForeignTable != table.name:
|
||||
continue
|
||||
scols = [c.ForeignName for c in fk.Fields]
|
||||
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
|
||||
table.append_constraint(schema.ForeignKeyConstraint(scols, rcols, link_to_name=True))
|
||||
|
||||
finally:
|
||||
dtbs.Close()
|
||||
|
||||
@reflection.cache
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
# A fresh DAO connection is opened for each reflection
|
||||
# This is necessary, so we get the latest updates
|
||||
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
|
||||
|
||||
names = [t.Name for t in dtbs.TableDefs if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
|
||||
dtbs.Close()
|
||||
return names
|
||||
|
||||
|
||||
class AccessCompiler(compiler.SQLCompiler):
|
||||
extract_map = compiler.SQLCompiler.extract_map.copy()
|
||||
extract_map.update ({
|
||||
'month': 'm',
|
||||
'day': 'd',
|
||||
'year': 'yyyy',
|
||||
'second': 's',
|
||||
'hour': 'h',
|
||||
'doy': 'y',
|
||||
'minute': 'n',
|
||||
'quarter': 'q',
|
||||
'dow': 'w',
|
||||
'week': 'ww'
|
||||
})
|
||||
|
||||
def visit_select_precolumns(self, select):
|
||||
"""Access puts TOP, it's version of LIMIT here """
|
||||
s = select.distinct and "DISTINCT " or ""
|
||||
if select.limit:
|
||||
s += "TOP %s " % (select.limit)
|
||||
if select.offset:
|
||||
raise exc.InvalidRequestError('Access does not support LIMIT with an offset')
|
||||
return s
|
||||
|
||||
def limit_clause(self, select):
|
||||
"""Limit in access is after the select keyword"""
|
||||
return ""
|
||||
|
||||
def binary_operator_string(self, binary):
|
||||
"""Access uses "mod" instead of "%" """
|
||||
return binary.operator == '%' and 'mod' or binary.operator
|
||||
|
||||
def label_select_column(self, select, column, asfrom):
|
||||
if isinstance(column, expression.Function):
|
||||
return column.label()
|
||||
else:
|
||||
return super(AccessCompiler, self).label_select_column(select, column, asfrom)
|
||||
|
||||
function_rewrites = {'current_date': 'now',
|
||||
'current_timestamp': 'now',
|
||||
'length': 'len',
|
||||
}
|
||||
def visit_function(self, func):
|
||||
"""Access function names differ from the ANSI SQL names; rewrite common ones"""
|
||||
func.name = self.function_rewrites.get(func.name, func.name)
|
||||
return super(AccessCompiler, self).visit_function(func)
|
||||
|
||||
def for_update_clause(self, select):
|
||||
"""FOR UPDATE is not supported by Access; silently ignore"""
|
||||
return ''
|
||||
|
||||
# Strip schema
|
||||
def visit_table(self, table, asfrom=False, **kwargs):
|
||||
if asfrom:
|
||||
return self.preparer.quote(table.name, table.quote)
|
||||
else:
|
||||
return ""
|
||||
|
||||
def visit_join(self, join, asfrom=False, **kwargs):
|
||||
return (self.process(join.left, asfrom=True) + (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
|
||||
self.process(join.right, asfrom=True) + " ON " + self.process(join.onclause))
|
||||
|
||||
def visit_extract(self, extract, **kw):
|
||||
field = self.extract_map.get(extract.field, extract.field)
|
||||
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
|
||||
|
||||
class AccessDDLCompiler(compiler.DDLCompiler):
|
||||
def get_column_specification(self, column, **kwargs):
|
||||
colspec = self.preparer.format_column(column) + " " + column.type.dialect_impl(self.dialect).get_col_spec()
|
||||
|
||||
# install a sequence if we have an implicit IDENTITY column
|
||||
if (not getattr(column.table, 'has_sequence', False)) and column.primary_key and \
|
||||
column.autoincrement and isinstance(column.type, types.Integer) and not column.foreign_keys:
|
||||
if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
|
||||
column.sequence = schema.Sequence(column.name + '_seq')
|
||||
|
||||
if not column.nullable:
|
||||
colspec += " NOT NULL"
|
||||
|
||||
if hasattr(column, 'sequence'):
|
||||
column.table.has_sequence = column
|
||||
colspec = self.preparer.format_column(column) + " counter"
|
||||
else:
|
||||
default = self.get_column_default_string(column)
|
||||
if default is not None:
|
||||
colspec += " DEFAULT " + default
|
||||
|
||||
return colspec
|
||||
|
||||
def visit_drop_index(self, drop):
|
||||
index = drop.element
|
||||
self.append("\nDROP INDEX [%s].[%s]" % (index.table.name, self._validate_identifier(index.name, False)))
|
||||
|
||||
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
|
||||
reserved_words = compiler.RESERVED_WORDS.copy()
|
||||
reserved_words.update(['value', 'text'])
|
||||
def __init__(self, dialect):
|
||||
super(AccessIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']')
|
||||
|
||||
|
||||
dialect = AccessDialect
|
||||
dialect.poolclass = pool.SingletonThreadPool
|
||||
dialect.statement_compiler = AccessCompiler
|
||||
dialect.ddlcompiler = AccessDDLCompiler
|
||||
dialect.preparer = AccessIdentifierPreparer
|
||||
dialect.execution_ctx_cls = AccessExecutionContext
|
16
sqlalchemy/dialects/firebird/__init__.py
Normal file
16
sqlalchemy/dialects/firebird/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from sqlalchemy.dialects.firebird import base, kinterbasdb
|
||||
|
||||
base.dialect = kinterbasdb.dialect
|
||||
|
||||
from sqlalchemy.dialects.firebird.base import \
|
||||
SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \
|
||||
TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\
|
||||
dialect
|
||||
|
||||
__all__ = (
|
||||
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
|
||||
'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
|
||||
'dialect'
|
||||
)
|
||||
|
||||
|
619
sqlalchemy/dialects/firebird/base.py
Normal file
619
sqlalchemy/dialects/firebird/base.py
Normal file
@@ -0,0 +1,619 @@
|
||||
# firebird.py
|
||||
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
"""
|
||||
Support for the Firebird database.
|
||||
|
||||
Connectivity is usually supplied via the kinterbasdb_ DBAPI module.
|
||||
|
||||
Dialects
|
||||
~~~~~~~~
|
||||
|
||||
Firebird offers two distinct dialects_ (not to be confused with a
|
||||
SQLAlchemy ``Dialect``):
|
||||
|
||||
dialect 1
|
||||
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
|
||||
|
||||
dialect 3
|
||||
This is the newer and supported syntax, introduced in Interbase 6.0.
|
||||
|
||||
The SQLAlchemy Firebird dialect detects these versions and
|
||||
adjusts its representation of SQL accordingly. However,
|
||||
support for dialect 1 is not well tested and probably has
|
||||
incompatibilities.
|
||||
|
||||
Locking Behavior
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Firebird locks tables aggressively. For this reason, a DROP TABLE may
|
||||
hang until other transactions are released. SQLAlchemy does its best
|
||||
to release transactions as quickly as possible. The most common cause
|
||||
of hanging transactions is a non-fully consumed result set, i.e.::
|
||||
|
||||
result = engine.execute("select * from table")
|
||||
row = result.fetchone()
|
||||
return
|
||||
|
||||
Where above, the ``ResultProxy`` has not been fully consumed. The
|
||||
connection will be returned to the pool and the transactional state
|
||||
rolled back once the Python garbage collector reclaims the objects
|
||||
which hold onto the connection, which often occurs asynchronously.
|
||||
The above use case can be alleviated by calling ``first()`` on the
|
||||
``ResultProxy`` which will fetch the first row and immediately close
|
||||
all remaining cursor/connection resources.
|
||||
|
||||
RETURNING support
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Firebird 2.0 supports returning a result set from inserts, and 2.1
|
||||
extends that to deletes and updates. This is generically exposed by
|
||||
the SQLAlchemy ``returning()`` method, such as::
|
||||
|
||||
# INSERT..RETURNING
|
||||
result = table.insert().returning(table.c.col1, table.c.col2).\\
|
||||
values(name='foo')
|
||||
print result.fetchall()
|
||||
|
||||
# UPDATE..RETURNING
|
||||
raises = empl.update().returning(empl.c.id, empl.c.salary).\\
|
||||
where(empl.c.sales>100).\\
|
||||
values(dict(salary=empl.c.salary * 1.1))
|
||||
print raises.fetchall()
|
||||
|
||||
|
||||
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
|
||||
|
||||
"""
|
||||
|
||||
import datetime, re
|
||||
|
||||
from sqlalchemy import schema as sa_schema
|
||||
from sqlalchemy import exc, types as sqltypes, sql, util
|
||||
from sqlalchemy.sql import expression
|
||||
from sqlalchemy.engine import base, default, reflection
|
||||
from sqlalchemy.sql import compiler
|
||||
|
||||
|
||||
from sqlalchemy.types import (BIGINT, BLOB, BOOLEAN, CHAR, DATE,
|
||||
FLOAT, INTEGER, NUMERIC, SMALLINT,
|
||||
TEXT, TIME, TIMESTAMP, VARCHAR)
|
||||
|
||||
|
||||
RESERVED_WORDS = set([
|
||||
"active", "add", "admin", "after", "all", "alter", "and", "any", "as",
|
||||
"asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
|
||||
"bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
|
||||
"character", "character_length", "char_length", "check", "close",
|
||||
"collate", "column", "commit", "committed", "computed", "conditional",
|
||||
"connect", "constraint", "containing", "count", "create", "cross",
|
||||
"cstring", "current", "current_connection", "current_date",
|
||||
"current_role", "current_time", "current_timestamp",
|
||||
"current_transaction", "current_user", "cursor", "database", "date",
|
||||
"day", "dec", "decimal", "declare", "default", "delete", "desc",
|
||||
"descending", "disconnect", "distinct", "do", "domain", "double",
|
||||
"drop", "else", "end", "entry_point", "escape", "exception",
|
||||
"execute", "exists", "exit", "external", "extract", "fetch", "file",
|
||||
"filter", "float", "for", "foreign", "from", "full", "function",
|
||||
"gdscode", "generator", "gen_id", "global", "grant", "group",
|
||||
"having", "hour", "if", "in", "inactive", "index", "inner",
|
||||
"input_type", "insensitive", "insert", "int", "integer", "into", "is",
|
||||
"isolation", "join", "key", "leading", "left", "length", "level",
|
||||
"like", "long", "lower", "manual", "max", "maximum_segment", "merge",
|
||||
"min", "minute", "module_name", "month", "names", "national",
|
||||
"natural", "nchar", "no", "not", "null", "numeric", "octet_length",
|
||||
"of", "on", "only", "open", "option", "or", "order", "outer",
|
||||
"output_type", "overflow", "page", "pages", "page_size", "parameter",
|
||||
"password", "plan", "position", "post_event", "precision", "primary",
|
||||
"privileges", "procedure", "protected", "rdb$db_key", "read", "real",
|
||||
"record_version", "recreate", "recursive", "references", "release",
|
||||
"reserv", "reserving", "retain", "returning_values", "returns",
|
||||
"revoke", "right", "rollback", "rows", "row_count", "savepoint",
|
||||
"schema", "second", "segment", "select", "sensitive", "set", "shadow",
|
||||
"shared", "singular", "size", "smallint", "snapshot", "some", "sort",
|
||||
"sqlcode", "stability", "start", "starting", "starts", "statistics",
|
||||
"sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
|
||||
"to", "trailing", "transaction", "trigger", "trim", "uncommitted",
|
||||
"union", "unique", "update", "upper", "user", "using", "value",
|
||||
"values", "varchar", "variable", "varying", "view", "wait", "when",
|
||||
"where", "while", "with", "work", "write", "year",
|
||||
])
|
||||
|
||||
|
||||
colspecs = {
|
||||
}
|
||||
|
||||
ischema_names = {
|
||||
'SHORT': SMALLINT,
|
||||
'LONG': BIGINT,
|
||||
'QUAD': FLOAT,
|
||||
'FLOAT': FLOAT,
|
||||
'DATE': DATE,
|
||||
'TIME': TIME,
|
||||
'TEXT': TEXT,
|
||||
'INT64': NUMERIC,
|
||||
'DOUBLE': FLOAT,
|
||||
'TIMESTAMP': TIMESTAMP,
|
||||
'VARYING': VARCHAR,
|
||||
'CSTRING': CHAR,
|
||||
'BLOB': BLOB,
|
||||
}
|
||||
|
||||
|
||||
# TODO: date conversion types (should be implemented as _FBDateTime, _FBDate, etc.
|
||||
# as bind/result functionality is required)
|
||||
|
||||
class FBTypeCompiler(compiler.GenericTypeCompiler):
|
||||
def visit_boolean(self, type_):
|
||||
return self.visit_SMALLINT(type_)
|
||||
|
||||
def visit_datetime(self, type_):
|
||||
return self.visit_TIMESTAMP(type_)
|
||||
|
||||
def visit_TEXT(self, type_):
|
||||
return "BLOB SUB_TYPE 1"
|
||||
|
||||
def visit_BLOB(self, type_):
|
||||
return "BLOB SUB_TYPE 0"
|
||||
|
||||
|
||||
class FBCompiler(sql.compiler.SQLCompiler):
|
||||
"""Firebird specific idiosincrasies"""
|
||||
|
||||
def visit_mod(self, binary, **kw):
|
||||
# Firebird lacks a builtin modulo operator, but there is
|
||||
# an equivalent function in the ib_udf library.
|
||||
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
|
||||
|
||||
def visit_alias(self, alias, asfrom=False, **kwargs):
|
||||
if self.dialect._version_two:
|
||||
return super(FBCompiler, self).visit_alias(alias, asfrom=asfrom, **kwargs)
|
||||
else:
|
||||
# Override to not use the AS keyword which FB 1.5 does not like
|
||||
if asfrom:
|
||||
alias_name = isinstance(alias.name, expression._generated_label) and \
|
||||
self._truncated_identifier("alias", alias.name) or alias.name
|
||||
|
||||
return self.process(alias.original, asfrom=asfrom, **kwargs) + " " + \
|
||||
self.preparer.format_alias(alias, alias_name)
|
||||
else:
|
||||
return self.process(alias.original, **kwargs)
|
||||
|
||||
def visit_substring_func(self, func, **kw):
|
||||
s = self.process(func.clauses.clauses[0])
|
||||
start = self.process(func.clauses.clauses[1])
|
||||
if len(func.clauses.clauses) > 2:
|
||||
length = self.process(func.clauses.clauses[2])
|
||||
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
|
||||
else:
|
||||
return "SUBSTRING(%s FROM %s)" % (s, start)
|
||||
|
||||
def visit_length_func(self, function, **kw):
|
||||
if self.dialect._version_two:
|
||||
return "char_length" + self.function_argspec(function)
|
||||
else:
|
||||
return "strlen" + self.function_argspec(function)
|
||||
|
||||
visit_char_length_func = visit_length_func
|
||||
|
||||
def function_argspec(self, func, **kw):
|
||||
if func.clauses is not None and len(func.clauses):
|
||||
return self.process(func.clause_expr)
|
||||
else:
|
||||
return ""
|
||||
|
||||
def default_from(self):
|
||||
return " FROM rdb$database"
|
||||
|
||||
def visit_sequence(self, seq):
|
||||
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
|
||||
|
||||
def get_select_precolumns(self, select):
|
||||
"""Called when building a ``SELECT`` statement, position is just
|
||||
before column list Firebird puts the limit and offset right
|
||||
after the ``SELECT``...
|
||||
"""
|
||||
|
||||
result = ""
|
||||
if select._limit:
|
||||
result += "FIRST %d " % select._limit
|
||||
if select._offset:
|
||||
result +="SKIP %d " % select._offset
|
||||
if select._distinct:
|
||||
result += "DISTINCT "
|
||||
return result
|
||||
|
||||
def limit_clause(self, select):
|
||||
"""Already taken care of in the `get_select_precolumns` method."""
|
||||
|
||||
return ""
|
||||
|
||||
def returning_clause(self, stmt, returning_cols):
|
||||
|
||||
columns = [
|
||||
self.process(
|
||||
self.label_select_column(None, c, asfrom=False),
|
||||
within_columns_clause=True,
|
||||
result_map=self.result_map
|
||||
)
|
||||
for c in expression._select_iterables(returning_cols)
|
||||
]
|
||||
return 'RETURNING ' + ', '.join(columns)
|
||||
|
||||
|
||||
class FBDDLCompiler(sql.compiler.DDLCompiler):
|
||||
"""Firebird syntactic idiosincrasies"""
|
||||
|
||||
def visit_create_sequence(self, create):
|
||||
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
|
||||
|
||||
# no syntax for these
|
||||
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
|
||||
if create.element.start is not None:
|
||||
raise NotImplemented("Firebird SEQUENCE doesn't support START WITH")
|
||||
if create.element.increment is not None:
|
||||
raise NotImplemented("Firebird SEQUENCE doesn't support INCREMENT BY")
|
||||
|
||||
if self.dialect._version_two:
|
||||
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element)
|
||||
else:
|
||||
return "CREATE GENERATOR %s" % self.preparer.format_sequence(create.element)
|
||||
|
||||
def visit_drop_sequence(self, drop):
|
||||
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
|
||||
|
||||
if self.dialect._version_two:
|
||||
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
|
||||
else:
|
||||
return "DROP GENERATOR %s" % self.preparer.format_sequence(drop.element)
|
||||
|
||||
|
||||
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
|
||||
"""Install Firebird specific reserved words."""
|
||||
|
||||
reserved_words = RESERVED_WORDS
|
||||
|
||||
def __init__(self, dialect):
|
||||
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
|
||||
|
||||
|
||||
class FBExecutionContext(default.DefaultExecutionContext):
|
||||
def fire_sequence(self, seq):
|
||||
"""Get the next value from the sequence using ``gen_id()``."""
|
||||
|
||||
return self._execute_scalar("SELECT gen_id(%s, 1) FROM rdb$database" % \
|
||||
self.dialect.identifier_preparer.format_sequence(seq))
|
||||
|
||||
|
||||
class FBDialect(default.DefaultDialect):
|
||||
"""Firebird dialect"""
|
||||
|
||||
name = 'firebird'
|
||||
|
||||
max_identifier_length = 31
|
||||
|
||||
supports_sequences = True
|
||||
sequences_optional = False
|
||||
supports_default_values = True
|
||||
postfetch_lastrowid = False
|
||||
|
||||
supports_native_boolean = False
|
||||
|
||||
requires_name_normalize = True
|
||||
supports_empty_insert = False
|
||||
|
||||
|
||||
statement_compiler = FBCompiler
|
||||
ddl_compiler = FBDDLCompiler
|
||||
preparer = FBIdentifierPreparer
|
||||
type_compiler = FBTypeCompiler
|
||||
execution_ctx_cls = FBExecutionContext
|
||||
|
||||
colspecs = colspecs
|
||||
ischema_names = ischema_names
|
||||
|
||||
# defaults to dialect ver. 3,
|
||||
# will be autodetected off upon
|
||||
# first connect
|
||||
_version_two = True
|
||||
|
||||
def initialize(self, connection):
|
||||
super(FBDialect, self).initialize(connection)
|
||||
self._version_two = self.server_version_info > (2, )
|
||||
if not self._version_two:
|
||||
# TODO: whatever other pre < 2.0 stuff goes here
|
||||
self.ischema_names = ischema_names.copy()
|
||||
self.ischema_names['TIMESTAMP'] = sqltypes.DATE
|
||||
self.colspecs = {
|
||||
sqltypes.DateTime: sqltypes.DATE
|
||||
}
|
||||
else:
|
||||
self.implicit_returning = True
|
||||
|
||||
def normalize_name(self, name):
|
||||
# Remove trailing spaces: FB uses a CHAR() type,
|
||||
# that is padded with spaces
|
||||
name = name and name.rstrip()
|
||||
if name is None:
|
||||
return None
|
||||
elif name.upper() == name and \
|
||||
not self.identifier_preparer._requires_quotes(name.lower()):
|
||||
return name.lower()
|
||||
else:
|
||||
return name
|
||||
|
||||
def denormalize_name(self, name):
|
||||
if name is None:
|
||||
return None
|
||||
elif name.lower() == name and \
|
||||
not self.identifier_preparer._requires_quotes(name.lower()):
|
||||
return name.upper()
|
||||
else:
|
||||
return name
|
||||
|
||||
def has_table(self, connection, table_name, schema=None):
|
||||
"""Return ``True`` if the given table exists, ignoring the `schema`."""
|
||||
|
||||
tblqry = """
|
||||
SELECT 1 FROM rdb$database
|
||||
WHERE EXISTS (SELECT rdb$relation_name
|
||||
FROM rdb$relations
|
||||
WHERE rdb$relation_name=?)
|
||||
"""
|
||||
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
|
||||
return c.first() is not None
|
||||
|
||||
def has_sequence(self, connection, sequence_name, schema=None):
|
||||
"""Return ``True`` if the given sequence (generator) exists."""
|
||||
|
||||
genqry = """
|
||||
SELECT 1 FROM rdb$database
|
||||
WHERE EXISTS (SELECT rdb$generator_name
|
||||
FROM rdb$generators
|
||||
WHERE rdb$generator_name=?)
|
||||
"""
|
||||
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
|
||||
return c.first() is not None
|
||||
|
||||
@reflection.cache
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
s = """
|
||||
SELECT DISTINCT rdb$relation_name
|
||||
FROM rdb$relation_fields
|
||||
WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
|
||||
"""
|
||||
return [self.normalize_name(row[0]) for row in connection.execute(s)]
|
||||
|
||||
@reflection.cache
|
||||
def get_view_names(self, connection, schema=None, **kw):
|
||||
s = """
|
||||
SELECT distinct rdb$view_name
|
||||
FROM rdb$view_relations
|
||||
"""
|
||||
return [self.normalize_name(row[0]) for row in connection.execute(s)]
|
||||
|
||||
@reflection.cache
|
||||
def get_view_definition(self, connection, view_name, schema=None, **kw):
|
||||
qry = """
|
||||
SELECT rdb$view_source AS view_source
|
||||
FROM rdb$relations
|
||||
WHERE rdb$relation_name=?
|
||||
"""
|
||||
rp = connection.execute(qry, [self.denormalize_name(view_name)])
|
||||
row = rp.first()
|
||||
if row:
|
||||
return row['view_source']
|
||||
else:
|
||||
return None
|
||||
|
||||
@reflection.cache
|
||||
def get_primary_keys(self, connection, table_name, schema=None, **kw):
|
||||
# Query to extract the PK/FK constrained fields of the given table
|
||||
keyqry = """
|
||||
SELECT se.rdb$field_name AS fname
|
||||
FROM rdb$relation_constraints rc
|
||||
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
|
||||
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
|
||||
"""
|
||||
tablename = self.denormalize_name(table_name)
|
||||
# get primary key fields
|
||||
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
|
||||
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
|
||||
return pkfields
|
||||
|
||||
@reflection.cache
|
||||
def get_column_sequence(self, connection, table_name, column_name, schema=None, **kw):
|
||||
tablename = self.denormalize_name(table_name)
|
||||
colname = self.denormalize_name(column_name)
|
||||
# Heuristic-query to determine the generator associated to a PK field
|
||||
genqry = """
|
||||
SELECT trigdep.rdb$depended_on_name AS fgenerator
|
||||
FROM rdb$dependencies tabdep
|
||||
JOIN rdb$dependencies trigdep
|
||||
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
|
||||
AND trigdep.rdb$depended_on_type=14
|
||||
AND trigdep.rdb$dependent_type=2
|
||||
JOIN rdb$triggers trig ON trig.rdb$trigger_name=tabdep.rdb$dependent_name
|
||||
WHERE tabdep.rdb$depended_on_name=?
|
||||
AND tabdep.rdb$depended_on_type=0
|
||||
AND trig.rdb$trigger_type=1
|
||||
AND tabdep.rdb$field_name=?
|
||||
AND (SELECT count(*)
|
||||
FROM rdb$dependencies trigdep2
|
||||
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
|
||||
"""
|
||||
genr = connection.execute(genqry, [tablename, colname]).first()
|
||||
if genr is not None:
|
||||
return dict(name=self.normalize_name(genr['fgenerator']))
|
||||
|
||||
@reflection.cache
|
||||
def get_columns(self, connection, table_name, schema=None, **kw):
|
||||
# Query to extract the details of all the fields of the given table
|
||||
tblqry = """
|
||||
SELECT DISTINCT r.rdb$field_name AS fname,
|
||||
r.rdb$null_flag AS null_flag,
|
||||
t.rdb$type_name AS ftype,
|
||||
f.rdb$field_sub_type AS stype,
|
||||
f.rdb$field_length/COALESCE(cs.rdb$bytes_per_character,1) AS flen,
|
||||
f.rdb$field_precision AS fprec,
|
||||
f.rdb$field_scale AS fscale,
|
||||
COALESCE(r.rdb$default_source, f.rdb$default_source) AS fdefault
|
||||
FROM rdb$relation_fields r
|
||||
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
|
||||
JOIN rdb$types t
|
||||
ON t.rdb$type=f.rdb$field_type AND t.rdb$field_name='RDB$FIELD_TYPE'
|
||||
LEFT JOIN rdb$character_sets cs ON f.rdb$character_set_id=cs.rdb$character_set_id
|
||||
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
|
||||
ORDER BY r.rdb$field_position
|
||||
"""
|
||||
# get the PK, used to determine the eventual associated sequence
|
||||
pkey_cols = self.get_primary_keys(connection, table_name)
|
||||
|
||||
tablename = self.denormalize_name(table_name)
|
||||
# get all of the fields for this table
|
||||
c = connection.execute(tblqry, [tablename])
|
||||
cols = []
|
||||
while True:
|
||||
row = c.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
name = self.normalize_name(row['fname'])
|
||||
orig_colname = row['fname']
|
||||
|
||||
# get the data type
|
||||
colspec = row['ftype'].rstrip()
|
||||
coltype = self.ischema_names.get(colspec)
|
||||
if coltype is None:
|
||||
util.warn("Did not recognize type '%s' of column '%s'" %
|
||||
(colspec, name))
|
||||
coltype = sqltypes.NULLTYPE
|
||||
elif colspec == 'INT64':
|
||||
coltype = coltype(precision=row['fprec'], scale=row['fscale'] * -1)
|
||||
elif colspec in ('VARYING', 'CSTRING'):
|
||||
coltype = coltype(row['flen'])
|
||||
elif colspec == 'TEXT':
|
||||
coltype = TEXT(row['flen'])
|
||||
elif colspec == 'BLOB':
|
||||
if row['stype'] == 1:
|
||||
coltype = TEXT()
|
||||
else:
|
||||
coltype = BLOB()
|
||||
else:
|
||||
coltype = coltype(row)
|
||||
|
||||
# does it have a default value?
|
||||
defvalue = None
|
||||
if row['fdefault'] is not None:
|
||||
# the value comes down as "DEFAULT 'value'": there may be
|
||||
# more than one whitespace around the "DEFAULT" keyword
|
||||
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
|
||||
defexpr = row['fdefault'].lstrip()
|
||||
assert defexpr[:8].rstrip()=='DEFAULT', "Unrecognized default value: %s" % defexpr
|
||||
defvalue = defexpr[8:].strip()
|
||||
if defvalue == 'NULL':
|
||||
# Redundant
|
||||
defvalue = None
|
||||
col_d = {
|
||||
'name' : name,
|
||||
'type' : coltype,
|
||||
'nullable' : not bool(row['null_flag']),
|
||||
'default' : defvalue
|
||||
}
|
||||
|
||||
if orig_colname.lower() == orig_colname:
|
||||
col_d['quote'] = True
|
||||
|
||||
# if the PK is a single field, try to see if its linked to
|
||||
# a sequence thru a trigger
|
||||
if len(pkey_cols)==1 and name==pkey_cols[0]:
|
||||
seq_d = self.get_column_sequence(connection, tablename, name)
|
||||
if seq_d is not None:
|
||||
col_d['sequence'] = seq_d
|
||||
|
||||
cols.append(col_d)
|
||||
return cols
|
||||
|
||||
@reflection.cache
|
||||
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
|
||||
# Query to extract the details of each UK/FK of the given table
|
||||
fkqry = """
|
||||
SELECT rc.rdb$constraint_name AS cname,
|
||||
cse.rdb$field_name AS fname,
|
||||
ix2.rdb$relation_name AS targetrname,
|
||||
se.rdb$field_name AS targetfname
|
||||
FROM rdb$relation_constraints rc
|
||||
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
|
||||
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
|
||||
JOIN rdb$index_segments cse ON cse.rdb$index_name=ix1.rdb$index_name
|
||||
JOIN rdb$index_segments se
|
||||
ON se.rdb$index_name=ix2.rdb$index_name
|
||||
AND se.rdb$field_position=cse.rdb$field_position
|
||||
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
|
||||
ORDER BY se.rdb$index_name, se.rdb$field_position
|
||||
"""
|
||||
tablename = self.denormalize_name(table_name)
|
||||
|
||||
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
|
||||
fks = util.defaultdict(lambda:{
|
||||
'name' : None,
|
||||
'constrained_columns' : [],
|
||||
'referred_schema' : None,
|
||||
'referred_table' : None,
|
||||
'referred_columns' : []
|
||||
})
|
||||
|
||||
for row in c:
|
||||
cname = self.normalize_name(row['cname'])
|
||||
fk = fks[cname]
|
||||
if not fk['name']:
|
||||
fk['name'] = cname
|
||||
fk['referred_table'] = self.normalize_name(row['targetrname'])
|
||||
fk['constrained_columns'].append(self.normalize_name(row['fname']))
|
||||
fk['referred_columns'].append(
|
||||
self.normalize_name(row['targetfname']))
|
||||
return fks.values()
|
||||
|
||||
@reflection.cache
|
||||
def get_indexes(self, connection, table_name, schema=None, **kw):
|
||||
qry = """
|
||||
SELECT ix.rdb$index_name AS index_name,
|
||||
ix.rdb$unique_flag AS unique_flag,
|
||||
ic.rdb$field_name AS field_name
|
||||
FROM rdb$indices ix
|
||||
JOIN rdb$index_segments ic
|
||||
ON ix.rdb$index_name=ic.rdb$index_name
|
||||
LEFT OUTER JOIN rdb$relation_constraints
|
||||
ON rdb$relation_constraints.rdb$index_name = ic.rdb$index_name
|
||||
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
|
||||
AND rdb$relation_constraints.rdb$constraint_type IS NULL
|
||||
ORDER BY index_name, field_name
|
||||
"""
|
||||
c = connection.execute(qry, [self.denormalize_name(table_name)])
|
||||
|
||||
indexes = util.defaultdict(dict)
|
||||
for row in c:
|
||||
indexrec = indexes[row['index_name']]
|
||||
if 'name' not in indexrec:
|
||||
indexrec['name'] = self.normalize_name(row['index_name'])
|
||||
indexrec['column_names'] = []
|
||||
indexrec['unique'] = bool(row['unique_flag'])
|
||||
|
||||
indexrec['column_names'].append(self.normalize_name(row['field_name']))
|
||||
|
||||
return indexes.values()
|
||||
|
||||
def do_execute(self, cursor, statement, parameters, **kwargs):
|
||||
# kinterbase does not accept a None, but wants an empty list
|
||||
# when there are no arguments.
|
||||
cursor.execute(statement, parameters or [])
|
||||
|
||||
def do_rollback(self, connection):
|
||||
# Use the retaining feature, that keeps the transaction going
|
||||
connection.rollback(True)
|
||||
|
||||
def do_commit(self, connection):
|
||||
# Use the retaining feature, that keeps the transaction going
|
||||
connection.commit(True)
|
120
sqlalchemy/dialects/firebird/kinterbasdb.py
Normal file
120
sqlalchemy/dialects/firebird/kinterbasdb.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# kinterbasdb.py
|
||||
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
"""
|
||||
The most common way to connect to a Firebird engine is implemented by
|
||||
kinterbasdb__, currently maintained__ directly by the Firebird people.
|
||||
|
||||
The connection URL is of the form
|
||||
``firebird[+kinterbasdb]://user:password@host:port/path/to/db[?key=value&key=value...]``.
|
||||
|
||||
Kinterbasedb backend specific keyword arguments are:
|
||||
|
||||
type_conv
|
||||
select the kind of mapping done on the types: by default SQLAlchemy
|
||||
uses 200 with Unicode, datetime and decimal support (see details__).
|
||||
|
||||
concurrency_level
|
||||
set the backend policy with regards to threading issues: by default
|
||||
SQLAlchemy uses policy 1 (see details__).
|
||||
|
||||
__ http://sourceforge.net/projects/kinterbasdb
|
||||
__ http://firebirdsql.org/index.php?op=devel&sub=python
|
||||
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
|
||||
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.firebird.base import FBDialect, FBCompiler
|
||||
from sqlalchemy import util, types as sqltypes
|
||||
|
||||
class _FBNumeric_kinterbasdb(sqltypes.Numeric):
|
||||
def bind_processor(self, dialect):
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return str(value)
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
class FBDialect_kinterbasdb(FBDialect):
|
||||
driver = 'kinterbasdb'
|
||||
supports_sane_rowcount = False
|
||||
supports_sane_multi_rowcount = False
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
colspecs = util.update_copy(
|
||||
FBDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric:_FBNumeric_kinterbasdb
|
||||
}
|
||||
|
||||
)
|
||||
|
||||
def __init__(self, type_conv=200, concurrency_level=1, **kwargs):
|
||||
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
|
||||
|
||||
self.type_conv = type_conv
|
||||
self.concurrency_level = concurrency_level
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
k = __import__('kinterbasdb')
|
||||
return k
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
if opts.get('port'):
|
||||
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
|
||||
del opts['port']
|
||||
opts.update(url.query)
|
||||
|
||||
type_conv = opts.pop('type_conv', self.type_conv)
|
||||
concurrency_level = opts.pop('concurrency_level', self.concurrency_level)
|
||||
|
||||
if self.dbapi is not None:
|
||||
initialized = getattr(self.dbapi, 'initialized', None)
|
||||
if initialized is None:
|
||||
# CVS rev 1.96 changed the name of the attribute:
|
||||
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
|
||||
initialized = getattr(self.dbapi, '_initialized', False)
|
||||
if not initialized:
|
||||
self.dbapi.init(type_conv=type_conv, concurrency_level=concurrency_level)
|
||||
return ([], opts)
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
"""Get the version of the Firebird server used by a connection.
|
||||
|
||||
Returns a tuple of (`major`, `minor`, `build`), three integers
|
||||
representing the version of the attached server.
|
||||
"""
|
||||
|
||||
# This is the simpler approach (the other uses the services api),
|
||||
# that for backward compatibility reasons returns a string like
|
||||
# LI-V6.3.3.12981 Firebird 2.0
|
||||
# where the first version is a fake one resembling the old
|
||||
# Interbase signature. This is more than enough for our purposes,
|
||||
# as this is mainly (only?) used by the testsuite.
|
||||
|
||||
from re import match
|
||||
|
||||
fbconn = connection.connection
|
||||
version = fbconn.server_version
|
||||
m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+) \w+ (\d+)\.(\d+)', version)
|
||||
if not m:
|
||||
raise AssertionError("Could not determine version from string '%s'" % version)
|
||||
return tuple([int(x) for x in m.group(5, 6, 4)])
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)):
|
||||
msg = str(e)
|
||||
return ('Unable to complete network request to host' in msg or
|
||||
'Invalid connection state' in msg or
|
||||
'Invalid cursor state' in msg)
|
||||
else:
|
||||
return False
|
||||
|
||||
dialect = FBDialect_kinterbasdb
|
3
sqlalchemy/dialects/informix/__init__.py
Normal file
3
sqlalchemy/dialects/informix/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from sqlalchemy.dialects.informix import base, informixdb
|
||||
|
||||
base.dialect = informixdb.dialect
|
306
sqlalchemy/dialects/informix/base.py
Normal file
306
sqlalchemy/dialects/informix/base.py
Normal file
@@ -0,0 +1,306 @@
|
||||
# informix.py
|
||||
# Copyright (C) 2005,2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
#
|
||||
# coding: gbk
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
"""Support for the Informix database.
|
||||
|
||||
This dialect is *not* tested on SQLAlchemy 0.6.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import datetime
|
||||
|
||||
from sqlalchemy import sql, schema, exc, pool, util
|
||||
from sqlalchemy.sql import compiler
|
||||
from sqlalchemy.engine import default, reflection
|
||||
from sqlalchemy import types as sqltypes
|
||||
|
||||
|
||||
class InfoDateTime(sqltypes.DateTime):
|
||||
def bind_processor(self, dialect):
|
||||
def process(value):
|
||||
if value is not None:
|
||||
if value.microsecond:
|
||||
value = value.replace(microsecond=0)
|
||||
return value
|
||||
return process
|
||||
|
||||
class InfoTime(sqltypes.Time):
|
||||
def bind_processor(self, dialect):
|
||||
def process(value):
|
||||
if value is not None:
|
||||
if value.microsecond:
|
||||
value = value.replace(microsecond=0)
|
||||
return value
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
if isinstance(value, datetime.datetime):
|
||||
return value.time()
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
|
||||
colspecs = {
|
||||
sqltypes.DateTime : InfoDateTime,
|
||||
sqltypes.Time: InfoTime,
|
||||
}
|
||||
|
||||
|
||||
ischema_names = {
|
||||
0 : sqltypes.CHAR, # CHAR
|
||||
1 : sqltypes.SMALLINT, # SMALLINT
|
||||
2 : sqltypes.INTEGER, # INT
|
||||
3 : sqltypes.FLOAT, # Float
|
||||
3 : sqltypes.Float, # SmallFloat
|
||||
5 : sqltypes.DECIMAL, # DECIMAL
|
||||
6 : sqltypes.Integer, # Serial
|
||||
7 : sqltypes.DATE, # DATE
|
||||
8 : sqltypes.Numeric, # MONEY
|
||||
10 : sqltypes.DATETIME, # DATETIME
|
||||
11 : sqltypes.LargeBinary, # BYTE
|
||||
12 : sqltypes.TEXT, # TEXT
|
||||
13 : sqltypes.VARCHAR, # VARCHAR
|
||||
15 : sqltypes.NCHAR, # NCHAR
|
||||
16 : sqltypes.NVARCHAR, # NVARCHAR
|
||||
17 : sqltypes.Integer, # INT8
|
||||
18 : sqltypes.Integer, # Serial8
|
||||
43 : sqltypes.String, # LVARCHAR
|
||||
-1 : sqltypes.BLOB, # BLOB
|
||||
-1 : sqltypes.CLOB, # CLOB
|
||||
}
|
||||
|
||||
|
||||
class InfoTypeCompiler(compiler.GenericTypeCompiler):
|
||||
def visit_DATETIME(self, type_):
|
||||
return "DATETIME YEAR TO SECOND"
|
||||
|
||||
def visit_TIME(self, type_):
|
||||
return "DATETIME HOUR TO SECOND"
|
||||
|
||||
def visit_large_binary(self, type_):
|
||||
return "BYTE"
|
||||
|
||||
def visit_boolean(self, type_):
|
||||
return "SMALLINT"
|
||||
|
||||
class InfoSQLCompiler(compiler.SQLCompiler):
|
||||
|
||||
def default_from(self):
|
||||
return " from systables where tabname = 'systables' "
|
||||
|
||||
def get_select_precolumns(self, select):
|
||||
s = select._distinct and "DISTINCT " or ""
|
||||
# only has limit
|
||||
if select._limit:
|
||||
s += " FIRST %s " % select._limit
|
||||
else:
|
||||
s += ""
|
||||
return s
|
||||
|
||||
def visit_select(self, select):
|
||||
# the column in order by clause must in select too
|
||||
|
||||
def __label(c):
|
||||
try:
|
||||
return c._label.lower()
|
||||
except:
|
||||
return ''
|
||||
|
||||
# TODO: dont modify the original select, generate a new one
|
||||
a = [__label(c) for c in select._raw_columns]
|
||||
for c in select._order_by_clause.clauses:
|
||||
if __label(c) not in a:
|
||||
select.append_column(c)
|
||||
|
||||
return compiler.SQLCompiler.visit_select(self, select)
|
||||
|
||||
def limit_clause(self, select):
|
||||
if select._offset is not None and select._offset > 0:
|
||||
raise NotImplementedError("Informix does not support OFFSET")
|
||||
return ""
|
||||
|
||||
def visit_function(self, func):
|
||||
if func.name.lower() == 'current_date':
|
||||
return "today"
|
||||
elif func.name.lower() == 'current_time':
|
||||
return "CURRENT HOUR TO SECOND"
|
||||
elif func.name.lower() in ('current_timestamp', 'now'):
|
||||
return "CURRENT YEAR TO SECOND"
|
||||
else:
|
||||
return compiler.SQLCompiler.visit_function(self, func)
|
||||
|
||||
|
||||
class InfoDDLCompiler(compiler.DDLCompiler):
|
||||
def get_column_specification(self, column, first_pk=False):
|
||||
colspec = self.preparer.format_column(column)
|
||||
if column.primary_key and len(column.foreign_keys)==0 and column.autoincrement and \
|
||||
isinstance(column.type, sqltypes.Integer) and first_pk:
|
||||
colspec += " SERIAL"
|
||||
else:
|
||||
colspec += " " + self.dialect.type_compiler.process(column.type)
|
||||
default = self.get_column_default_string(column)
|
||||
if default is not None:
|
||||
colspec += " DEFAULT " + default
|
||||
|
||||
if not column.nullable:
|
||||
colspec += " NOT NULL"
|
||||
|
||||
return colspec
|
||||
|
||||
|
||||
class InfoIdentifierPreparer(compiler.IdentifierPreparer):
|
||||
def __init__(self, dialect):
|
||||
super(InfoIdentifierPreparer, self).__init__(dialect, initial_quote="'")
|
||||
|
||||
def format_constraint(self, constraint):
|
||||
# informix doesnt support names for constraints
|
||||
return ''
|
||||
|
||||
def _requires_quotes(self, value):
|
||||
return False
|
||||
|
||||
class InformixDialect(default.DefaultDialect):
|
||||
name = 'informix'
|
||||
|
||||
max_identifier_length = 128 # adjusts at runtime based on server version
|
||||
|
||||
type_compiler = InfoTypeCompiler
|
||||
statement_compiler = InfoSQLCompiler
|
||||
ddl_compiler = InfoDDLCompiler
|
||||
preparer = InfoIdentifierPreparer
|
||||
colspecs = colspecs
|
||||
ischema_names = ischema_names
|
||||
|
||||
def initialize(self, connection):
|
||||
super(InformixDialect, self).initialize(connection)
|
||||
|
||||
# http://www.querix.com/support/knowledge-base/error_number_message/error_200
|
||||
if self.server_version_info < (9, 2):
|
||||
self.max_identifier_length = 18
|
||||
else:
|
||||
self.max_identifier_length = 128
|
||||
|
||||
def do_begin(self, connect):
|
||||
cu = connect.cursor()
|
||||
cu.execute('SET LOCK MODE TO WAIT')
|
||||
#cu.execute('SET ISOLATION TO REPEATABLE READ')
|
||||
|
||||
@reflection.cache
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
s = "select tabname from systables"
|
||||
return [row[0] for row in connection.execute(s)]
|
||||
|
||||
def has_table(self, connection, table_name, schema=None):
|
||||
cursor = connection.execute("""select tabname from systables where tabname=?""", table_name.lower())
|
||||
return cursor.first() is not None
|
||||
|
||||
@reflection.cache
|
||||
def get_columns(self, connection, table_name, schema=None, **kw):
|
||||
c = connection.execute ("""select colname , coltype , collength , t3.default , t1.colno from
|
||||
syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
|
||||
where t1.tabid = t2.tabid and t2.tabname=?
|
||||
and t3.tabid = t2.tabid and t3.colno = t1.colno
|
||||
order by t1.colno""", table.name.lower())
|
||||
columns = []
|
||||
for name, colattr, collength, default, colno in rows:
|
||||
name = name.lower()
|
||||
if include_columns and name not in include_columns:
|
||||
continue
|
||||
|
||||
# in 7.31, coltype = 0x000
|
||||
# ^^-- column type
|
||||
# ^-- 1 not null, 0 null
|
||||
nullable, coltype = divmod(colattr, 256)
|
||||
if coltype not in (0, 13) and default:
|
||||
default = default.split()[-1]
|
||||
|
||||
if coltype == 0 or coltype == 13: # char, varchar
|
||||
coltype = ischema_names[coltype](collength)
|
||||
if default:
|
||||
default = "'%s'" % default
|
||||
elif coltype == 5: # decimal
|
||||
precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
|
||||
if scale == 255:
|
||||
scale = 0
|
||||
coltype = sqltypes.Numeric(precision, scale)
|
||||
else:
|
||||
try:
|
||||
coltype = ischema_names[coltype]
|
||||
except KeyError:
|
||||
util.warn("Did not recognize type '%s' of column '%s'" %
|
||||
(coltype, name))
|
||||
coltype = sqltypes.NULLTYPE
|
||||
|
||||
# TODO: nullability ??
|
||||
nullable = True
|
||||
|
||||
column_info = dict(name=name, type=coltype, nullable=nullable,
|
||||
default=default)
|
||||
columns.append(column_info)
|
||||
return columns
|
||||
|
||||
@reflection.cache
|
||||
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
|
||||
# FK
|
||||
c = connection.execute("""select t1.constrname as cons_name , t1.constrtype as cons_type ,
|
||||
t4.colname as local_column , t7.tabname as remote_table ,
|
||||
t6.colname as remote_column
|
||||
from sysconstraints as t1 , systables as t2 ,
|
||||
sysindexes as t3 , syscolumns as t4 ,
|
||||
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
|
||||
sysconstraints as t8 , sysindexes as t9
|
||||
where t1.tabid = t2.tabid and t2.tabname=? and t1.constrtype = 'R'
|
||||
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
|
||||
and t4.tabid = t2.tabid and t4.colno = t3.part1
|
||||
and t5.constrid = t1.constrid and t8.constrid = t5.primary
|
||||
and t6.tabid = t5.ptabid and t6.colno = t9.part1 and t9.idxname = t8.idxname
|
||||
and t7.tabid = t5.ptabid""", table.name.lower())
|
||||
|
||||
|
||||
def fkey_rec():
|
||||
return {
|
||||
'name' : None,
|
||||
'constrained_columns' : [],
|
||||
'referred_schema' : None,
|
||||
'referred_table' : None,
|
||||
'referred_columns' : []
|
||||
}
|
||||
|
||||
fkeys = util.defaultdict(fkey_rec)
|
||||
|
||||
for cons_name, cons_type, local_column, remote_table, remote_column in rows:
|
||||
|
||||
rec = fkeys[cons_name]
|
||||
rec['name'] = cons_name
|
||||
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
|
||||
|
||||
if not rec['referred_table']:
|
||||
rec['referred_table'] = remote_table
|
||||
|
||||
local_cols.append(local_column)
|
||||
remote_cols.append(remote_column)
|
||||
|
||||
return fkeys.values()
|
||||
|
||||
@reflection.cache
|
||||
def get_primary_keys(self, connection, table_name, schema=None, **kw):
|
||||
c = connection.execute("""select t4.colname as local_column
|
||||
from sysconstraints as t1 , systables as t2 ,
|
||||
sysindexes as t3 , syscolumns as t4
|
||||
where t1.tabid = t2.tabid and t2.tabname=? and t1.constrtype = 'P'
|
||||
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
|
||||
and t4.tabid = t2.tabid and t4.colno = t3.part1""", table.name.lower())
|
||||
return [r[0] for r in c.fetchall()]
|
||||
|
||||
@reflection.cache
|
||||
def get_indexes(self, connection, table_name, schema, **kw):
|
||||
# TODO
|
||||
return []
|
46
sqlalchemy/dialects/informix/informixdb.py
Normal file
46
sqlalchemy/dialects/informix/informixdb.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from sqlalchemy.dialects.informix.base import InformixDialect
|
||||
from sqlalchemy.engine import default
|
||||
|
||||
class InformixExecutionContext_informixdb(default.DefaultExecutionContext):
|
||||
def post_exec(self):
|
||||
if self.isinsert:
|
||||
self._lastrowid = [self.cursor.sqlerrd[1]]
|
||||
|
||||
|
||||
class InformixDialect_informixdb(InformixDialect):
|
||||
driver = 'informixdb'
|
||||
default_paramstyle = 'qmark'
|
||||
execution_context_cls = InformixExecutionContext_informixdb
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
return __import__('informixdb')
|
||||
|
||||
def create_connect_args(self, url):
|
||||
if url.host:
|
||||
dsn = '%s@%s' % (url.database, url.host)
|
||||
else:
|
||||
dsn = url.database
|
||||
|
||||
if url.username:
|
||||
opt = {'user': url.username, 'password': url.password}
|
||||
else:
|
||||
opt = {}
|
||||
|
||||
return ([dsn], opt)
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
# http://informixdb.sourceforge.net/manual.html#inspecting-version-numbers
|
||||
vers = connection.dbms_version
|
||||
|
||||
# TODO: not tested
|
||||
return tuple([int(x) for x in vers.split('.')])
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, self.dbapi.OperationalError):
|
||||
return 'closed the connection' in str(e) or 'connection not open' in str(e)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
dialect = InformixDialect_informixdb
|
3
sqlalchemy/dialects/maxdb/__init__.py
Normal file
3
sqlalchemy/dialects/maxdb/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from sqlalchemy.dialects.maxdb import base, sapdb
|
||||
|
||||
base.dialect = sapdb.dialect
|
1058
sqlalchemy/dialects/maxdb/base.py
Normal file
1058
sqlalchemy/dialects/maxdb/base.py
Normal file
File diff suppressed because it is too large
Load Diff
17
sqlalchemy/dialects/maxdb/sapdb.py
Normal file
17
sqlalchemy/dialects/maxdb/sapdb.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from sqlalchemy.dialects.maxdb.base import MaxDBDialect
|
||||
|
||||
class MaxDBDialect_sapdb(MaxDBDialect):
|
||||
driver = 'sapdb'
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
from sapdb import dbapi as _dbapi
|
||||
return _dbapi
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
opts.update(url.query)
|
||||
return [], opts
|
||||
|
||||
|
||||
dialect = MaxDBDialect_sapdb
|
19
sqlalchemy/dialects/mssql/__init__.py
Normal file
19
sqlalchemy/dialects/mssql/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, pymssql, zxjdbc, mxodbc
|
||||
|
||||
base.dialect = pyodbc.dialect
|
||||
|
||||
from sqlalchemy.dialects.mssql.base import \
|
||||
INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
|
||||
NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
|
||||
DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
|
||||
BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
|
||||
MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
|
||||
|
||||
|
||||
__all__ = (
|
||||
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
|
||||
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
|
||||
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
|
||||
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
|
||||
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
|
||||
)
|
59
sqlalchemy/dialects/mssql/adodbapi.py
Normal file
59
sqlalchemy/dialects/mssql/adodbapi.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""
|
||||
The adodbapi dialect is not implemented for 0.6 at this time.
|
||||
|
||||
"""
|
||||
from sqlalchemy import types as sqltypes, util
|
||||
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
|
||||
import sys
|
||||
|
||||
class MSDateTime_adodbapi(MSDateTime):
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
# adodbapi will return datetimes with empty time values as datetime.date() objects.
|
||||
# Promote them back to full datetime.datetime()
|
||||
if type(value) is datetime.date:
|
||||
return datetime.datetime(value.year, value.month, value.day)
|
||||
return value
|
||||
return process
|
||||
|
||||
|
||||
class MSDialect_adodbapi(MSDialect):
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
supports_unicode = sys.maxunicode == 65535
|
||||
supports_unicode_statements = True
|
||||
driver = 'adodbapi'
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import adodbapi as module
|
||||
return module
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MSDialect.colspecs,
|
||||
{
|
||||
sqltypes.DateTime:MSDateTime_adodbapi
|
||||
}
|
||||
)
|
||||
|
||||
def create_connect_args(self, url):
|
||||
keys = url.query
|
||||
|
||||
connectors = ["Provider=SQLOLEDB"]
|
||||
if 'port' in keys:
|
||||
connectors.append ("Data Source=%s, %s" % (keys.get("host"), keys.get("port")))
|
||||
else:
|
||||
connectors.append ("Data Source=%s" % keys.get("host"))
|
||||
connectors.append ("Initial Catalog=%s" % keys.get("database"))
|
||||
user = keys.get("user")
|
||||
if user:
|
||||
connectors.append("User Id=%s" % user)
|
||||
connectors.append("Password=%s" % keys.get("password", ""))
|
||||
else:
|
||||
connectors.append("Integrated Security=SSPI")
|
||||
return [[";".join (connectors)], {}]
|
||||
|
||||
def is_disconnect(self, e):
|
||||
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and "'connection failure'" in str(e)
|
||||
|
||||
dialect = MSDialect_adodbapi
|
1297
sqlalchemy/dialects/mssql/base.py
Normal file
1297
sqlalchemy/dialects/mssql/base.py
Normal file
File diff suppressed because it is too large
Load Diff
83
sqlalchemy/dialects/mssql/information_schema.py
Normal file
83
sqlalchemy/dialects/mssql/information_schema.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from sqlalchemy import Table, MetaData, Column, ForeignKey
|
||||
from sqlalchemy.types import String, Unicode, Integer, TypeDecorator
|
||||
|
||||
ischema = MetaData()
|
||||
|
||||
class CoerceUnicode(TypeDecorator):
|
||||
impl = Unicode
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
if isinstance(value, str):
|
||||
value = value.decode(dialect.encoding)
|
||||
return value
|
||||
|
||||
schemata = Table("SCHEMATA", ischema,
|
||||
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
|
||||
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
|
||||
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
tables = Table("TABLES", ischema,
|
||||
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
columns = Table("COLUMNS", ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("IS_NULLABLE", Integer, key="is_nullable"),
|
||||
Column("DATA_TYPE", String, key="data_type"),
|
||||
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
|
||||
Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"),
|
||||
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
|
||||
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
|
||||
Column("COLUMN_DEFAULT", Integer, key="column_default"),
|
||||
Column("COLLATION_NAME", String, key="collation_name"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
constraints = Table("TABLE_CONSTRAINTS", ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
key_constraints = Table("KEY_COLUMN_USAGE", ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
|
||||
Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
|
||||
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, key="unique_constraint_catalog"), # TODO: is CATLOG misspelled ?
|
||||
Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, key="unique_constraint_schema"),
|
||||
Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"),
|
||||
Column("MATCH_OPTION", String, key="match_option"),
|
||||
Column("UPDATE_RULE", String, key="update_rule"),
|
||||
Column("DELETE_RULE", String, key="delete_rule"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
||||
views = Table("VIEWS", ischema,
|
||||
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
|
||||
Column("CHECK_OPTION", String, key="check_option"),
|
||||
Column("IS_UPDATABLE", String, key="is_updatable"),
|
||||
schema="INFORMATION_SCHEMA")
|
||||
|
83
sqlalchemy/dialects/mssql/mxodbc.py
Normal file
83
sqlalchemy/dialects/mssql/mxodbc.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
Support for MS-SQL via mxODBC.
|
||||
|
||||
mxODBC is available at:
|
||||
|
||||
http://www.egenix.com/
|
||||
|
||||
This was tested with mxODBC 3.1.2 and the SQL Server Native
|
||||
Client connected to MSSQL 2005 and 2008 Express Editions.
|
||||
|
||||
Connecting
|
||||
~~~~~~~~~~
|
||||
|
||||
Connection is via DSN::
|
||||
|
||||
mssql+mxodbc://<username>:<password>@<dsnname>
|
||||
|
||||
Execution Modes
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
mxODBC features two styles of statement execution, using the ``cursor.execute()``
|
||||
and ``cursor.executedirect()`` methods (the second being an extension to the
|
||||
DBAPI specification). The former makes use of the native
|
||||
parameter binding services of the ODBC driver, while the latter uses string escaping.
|
||||
The primary advantage to native parameter binding is that the same statement, when
|
||||
executed many times, is only prepared once. Whereas the primary advantage to the
|
||||
latter is that the rules for bind parameter placement are relaxed. MS-SQL has very
|
||||
strict rules for native binds, including that they cannot be placed within the argument
|
||||
lists of function calls, anywhere outside the FROM, or even within subqueries within the
|
||||
FROM clause - making the usage of bind parameters within SELECT statements impossible for
|
||||
all but the most simplistic statements. For this reason, the mxODBC dialect uses the
|
||||
"native" mode by default only for INSERT, UPDATE, and DELETE statements, and uses the
|
||||
escaped string mode for all other statements. This behavior can be controlled completely
|
||||
via :meth:`~sqlalchemy.sql.expression.Executable.execution_options`
|
||||
using the ``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a value of
|
||||
``True`` will unconditionally use native bind parameters and a value of ``False`` will
|
||||
uncondtionally use string-escaped parameters.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy import util
|
||||
from sqlalchemy.connectors.mxodbc import MxODBCConnector
|
||||
from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
|
||||
from sqlalchemy.dialects.mssql.base import (MSExecutionContext, MSDialect,
|
||||
MSSQLCompiler, MSSQLStrictCompiler,
|
||||
_MSDateTime, _MSDate, TIME)
|
||||
|
||||
|
||||
|
||||
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
|
||||
"""
|
||||
The pyodbc execution context is useful for enabling
|
||||
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
|
||||
does not work (tables with insert triggers).
|
||||
"""
|
||||
#todo - investigate whether the pyodbc execution context
|
||||
# is really only being used in cases where OUTPUT
|
||||
# won't work.
|
||||
|
||||
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
|
||||
|
||||
# TODO: may want to use this only if FreeTDS is not in use,
|
||||
# since FreeTDS doesn't seem to use native binds.
|
||||
statement_compiler = MSSQLStrictCompiler
|
||||
execution_ctx_cls = MSExecutionContext_mxodbc
|
||||
colspecs = {
|
||||
#sqltypes.Numeric : _MSNumeric,
|
||||
sqltypes.DateTime : _MSDateTime,
|
||||
sqltypes.Date : _MSDate,
|
||||
sqltypes.Time : TIME,
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, description_encoding='latin-1', **params):
|
||||
super(MSDialect_mxodbc, self).__init__(**params)
|
||||
self.description_encoding = description_encoding
|
||||
|
||||
dialect = MSDialect_mxodbc
|
||||
|
101
sqlalchemy/dialects/mssql/pymssql.py
Normal file
101
sqlalchemy/dialects/mssql/pymssql.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Support for the pymssql dialect.
|
||||
|
||||
This dialect supports pymssql 1.0 and greater.
|
||||
|
||||
pymssql is available at:
|
||||
|
||||
http://pymssql.sourceforge.net/
|
||||
|
||||
Connecting
|
||||
^^^^^^^^^^
|
||||
|
||||
Sample connect string::
|
||||
|
||||
mssql+pymssql://<username>:<password>@<freetds_name>
|
||||
|
||||
Adding "?charset=utf8" or similar will cause pymssql to return
|
||||
strings as Python unicode objects. This can potentially improve
|
||||
performance in some scenarios as decoding of strings is
|
||||
handled natively.
|
||||
|
||||
Limitations
|
||||
^^^^^^^^^^^
|
||||
|
||||
pymssql inherits a lot of limitations from FreeTDS, including:
|
||||
|
||||
* no support for multibyte schema identifiers
|
||||
* poor support for large decimals
|
||||
* poor support for binary fields
|
||||
* poor support for VARCHAR/CHAR fields over 255 characters
|
||||
|
||||
Please consult the pymssql documentation for further information.
|
||||
|
||||
"""
|
||||
from sqlalchemy.dialects.mssql.base import MSDialect
|
||||
from sqlalchemy import types as sqltypes, util, processors
|
||||
import re
|
||||
import decimal
|
||||
|
||||
class _MSNumeric_pymssql(sqltypes.Numeric):
|
||||
def result_processor(self, dialect, type_):
|
||||
if not self.asdecimal:
|
||||
return processors.to_float
|
||||
else:
|
||||
return sqltypes.Numeric.result_processor(self, dialect, type_)
|
||||
|
||||
class MSDialect_pymssql(MSDialect):
|
||||
supports_sane_rowcount = False
|
||||
max_identifier_length = 30
|
||||
driver = 'pymssql'
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MSDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric:_MSNumeric_pymssql,
|
||||
sqltypes.Float:sqltypes.Float,
|
||||
}
|
||||
)
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
module = __import__('pymssql')
|
||||
# pymmsql doesn't have a Binary method. we use string
|
||||
# TODO: monkeypatching here is less than ideal
|
||||
module.Binary = str
|
||||
|
||||
client_ver = tuple(int(x) for x in module.__version__.split("."))
|
||||
if client_ver < (1, ):
|
||||
util.warn("The pymssql dialect expects at least "
|
||||
"the 1.0 series of the pymssql DBAPI.")
|
||||
return module
|
||||
|
||||
def __init__(self, **params):
|
||||
super(MSDialect_pymssql, self).__init__(**params)
|
||||
self.use_scope_identity = True
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
vers = connection.scalar("select @@version")
|
||||
m = re.match(r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers)
|
||||
if m:
|
||||
return tuple(int(x) for x in m.group(1, 2, 3, 4))
|
||||
else:
|
||||
return None
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
opts.update(url.query)
|
||||
opts.pop('port', None)
|
||||
return [[], opts]
|
||||
|
||||
def is_disconnect(self, e):
|
||||
for msg in (
|
||||
"Error 10054",
|
||||
"Not connected to any MS SQL server",
|
||||
"Connection is closed"
|
||||
):
|
||||
if msg in str(e):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
dialect = MSDialect_pymssql
|
197
sqlalchemy/dialects/mssql/pyodbc.py
Normal file
197
sqlalchemy/dialects/mssql/pyodbc.py
Normal file
@@ -0,0 +1,197 @@
|
||||
"""
|
||||
Support for MS-SQL via pyodbc.
|
||||
|
||||
pyodbc is available at:
|
||||
|
||||
http://pypi.python.org/pypi/pyodbc/
|
||||
|
||||
Connecting
|
||||
^^^^^^^^^^
|
||||
|
||||
Examples of pyodbc connection string URLs:
|
||||
|
||||
* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
|
||||
The connection string that is created will appear like::
|
||||
|
||||
dsn=mydsn;Trusted_Connection=Yes
|
||||
|
||||
* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named
|
||||
``mydsn`` passing in the ``UID`` and ``PWD`` information. The
|
||||
connection string that is created will appear like::
|
||||
|
||||
dsn=mydsn;UID=user;PWD=pass
|
||||
|
||||
* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects
|
||||
using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD``
|
||||
information, plus the additional connection configuration option
|
||||
``LANGUAGE``. The connection string that is created will appear
|
||||
like::
|
||||
|
||||
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
|
||||
|
||||
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection string
|
||||
dynamically created that would appear like::
|
||||
|
||||
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
|
||||
|
||||
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
|
||||
string that is dynamically created, which also includes the port
|
||||
information using the comma syntax. If your connection string
|
||||
requires the port information to be passed as a ``port`` keyword
|
||||
see the next example. This will create the following connection
|
||||
string::
|
||||
|
||||
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
|
||||
|
||||
* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
|
||||
string that is dynamically created that includes the port
|
||||
information as a separate ``port`` keyword. This will create the
|
||||
following connection string::
|
||||
|
||||
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
|
||||
|
||||
If you require a connection string that is outside the options
|
||||
presented above, use the ``odbc_connect`` keyword to pass in a
|
||||
urlencoded connection string. What gets passed in will be urldecoded
|
||||
and passed directly.
|
||||
|
||||
For example::
|
||||
|
||||
mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb
|
||||
|
||||
would create the following connection string::
|
||||
|
||||
dsn=mydsn;Database=db
|
||||
|
||||
Encoding your connection string can be easily accomplished through
|
||||
the python shell. For example::
|
||||
|
||||
>>> import urllib
|
||||
>>> urllib.quote_plus('dsn=mydsn;Database=db')
|
||||
'dsn%3Dmydsn%3BDatabase%3Ddb'
|
||||
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.mssql.base import MSExecutionContext, MSDialect
|
||||
from sqlalchemy.connectors.pyodbc import PyODBCConnector
|
||||
from sqlalchemy import types as sqltypes, util
|
||||
import decimal
|
||||
|
||||
class _MSNumeric_pyodbc(sqltypes.Numeric):
|
||||
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
|
||||
|
||||
This is the only method that is proven to work with Pyodbc+MSSQL
|
||||
without crashing (floats can be used but seem to cause sporadic
|
||||
crashes).
|
||||
|
||||
"""
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_process = super(_MSNumeric_pyodbc, self).bind_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
if self.asdecimal and \
|
||||
isinstance(value, decimal.Decimal):
|
||||
|
||||
adjusted = value.adjusted()
|
||||
if adjusted < 0:
|
||||
return self._small_dec_to_string(value)
|
||||
elif adjusted > 7:
|
||||
return self._large_dec_to_string(value)
|
||||
|
||||
if super_process:
|
||||
return super_process(value)
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
def _small_dec_to_string(self, value):
|
||||
return "%s0.%s%s" % (
|
||||
(value < 0 and '-' or ''),
|
||||
'0' * (abs(value.adjusted()) - 1),
|
||||
"".join([str(nint) for nint in value._int]))
|
||||
|
||||
def _large_dec_to_string(self, value):
|
||||
if 'E' in str(value):
|
||||
result = "%s%s%s" % (
|
||||
(value < 0 and '-' or ''),
|
||||
"".join([str(s) for s in value._int]),
|
||||
"0" * (value.adjusted() - (len(value._int)-1)))
|
||||
else:
|
||||
if (len(value._int) - 1) > value.adjusted():
|
||||
result = "%s%s.%s" % (
|
||||
(value < 0 and '-' or ''),
|
||||
"".join([str(s) for s in value._int][0:value.adjusted() + 1]),
|
||||
"".join([str(s) for s in value._int][value.adjusted() + 1:]))
|
||||
else:
|
||||
result = "%s%s" % (
|
||||
(value < 0 and '-' or ''),
|
||||
"".join([str(s) for s in value._int][0:value.adjusted() + 1]))
|
||||
return result
|
||||
|
||||
|
||||
class MSExecutionContext_pyodbc(MSExecutionContext):
|
||||
_embedded_scope_identity = False
|
||||
|
||||
def pre_exec(self):
|
||||
"""where appropriate, issue "select scope_identity()" in the same statement.
|
||||
|
||||
Background on why "scope_identity()" is preferable to "@@identity":
|
||||
http://msdn.microsoft.com/en-us/library/ms190315.aspx
|
||||
|
||||
Background on why we attempt to embed "scope_identity()" into the same
|
||||
statement as the INSERT:
|
||||
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
|
||||
|
||||
"""
|
||||
|
||||
super(MSExecutionContext_pyodbc, self).pre_exec()
|
||||
|
||||
# don't embed the scope_identity select into an "INSERT .. DEFAULT VALUES"
|
||||
if self._select_lastrowid and \
|
||||
self.dialect.use_scope_identity and \
|
||||
len(self.parameters[0]):
|
||||
self._embedded_scope_identity = True
|
||||
|
||||
self.statement += "; select scope_identity()"
|
||||
|
||||
def post_exec(self):
|
||||
if self._embedded_scope_identity:
|
||||
# Fetch the last inserted id from the manipulated statement
|
||||
# We may have to skip over a number of result sets with no data (due to triggers, etc.)
|
||||
while True:
|
||||
try:
|
||||
# fetchall() ensures the cursor is consumed
|
||||
# without closing it (FreeTDS particularly)
|
||||
row = self.cursor.fetchall()[0]
|
||||
break
|
||||
except self.dialect.dbapi.Error, e:
|
||||
# no way around this - nextset() consumes the previous set
|
||||
# so we need to just keep flipping
|
||||
self.cursor.nextset()
|
||||
|
||||
self._lastrowid = int(row[0])
|
||||
else:
|
||||
super(MSExecutionContext_pyodbc, self).post_exec()
|
||||
|
||||
|
||||
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
|
||||
|
||||
execution_ctx_cls = MSExecutionContext_pyodbc
|
||||
|
||||
pyodbc_driver_name = 'SQL Server'
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MSDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric:_MSNumeric_pyodbc
|
||||
}
|
||||
)
|
||||
|
||||
def __init__(self, description_encoding='latin-1', **params):
|
||||
super(MSDialect_pyodbc, self).__init__(**params)
|
||||
self.description_encoding = description_encoding
|
||||
self.use_scope_identity = self.dbapi and hasattr(self.dbapi.Cursor, 'nextset')
|
||||
|
||||
dialect = MSDialect_pyodbc
|
64
sqlalchemy/dialects/mssql/zxjdbc.py
Normal file
64
sqlalchemy/dialects/mssql/zxjdbc.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""Support for the Microsoft SQL Server database via the zxjdbc JDBC
|
||||
connector.
|
||||
|
||||
JDBC Driver
|
||||
-----------
|
||||
|
||||
Requires the jTDS driver, available from: http://jtds.sourceforge.net/
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
URLs are of the standard form of
|
||||
``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``.
|
||||
|
||||
Additional arguments which may be specified either as query string
|
||||
arguments on the URL, or as keyword arguments to
|
||||
:func:`~sqlalchemy.create_engine()` will be passed as Connection
|
||||
properties to the underlying JDBC driver.
|
||||
|
||||
"""
|
||||
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
|
||||
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
|
||||
from sqlalchemy.engine import base
|
||||
|
||||
class MSExecutionContext_zxjdbc(MSExecutionContext):
|
||||
|
||||
_embedded_scope_identity = False
|
||||
|
||||
def pre_exec(self):
|
||||
super(MSExecutionContext_zxjdbc, self).pre_exec()
|
||||
# scope_identity after the fact returns null in jTDS so we must
|
||||
# embed it
|
||||
if self._select_lastrowid and self.dialect.use_scope_identity:
|
||||
self._embedded_scope_identity = True
|
||||
self.statement += "; SELECT scope_identity()"
|
||||
|
||||
def post_exec(self):
|
||||
if self._embedded_scope_identity:
|
||||
while True:
|
||||
try:
|
||||
row = self.cursor.fetchall()[0]
|
||||
break
|
||||
except self.dialect.dbapi.Error, e:
|
||||
self.cursor.nextset()
|
||||
self._lastrowid = int(row[0])
|
||||
|
||||
if (self.isinsert or self.isupdate or self.isdelete) and self.compiled.returning:
|
||||
self._result_proxy = base.FullyBufferedResultProxy(self)
|
||||
|
||||
if self._enable_identity_insert:
|
||||
table = self.dialect.identifier_preparer.format_table(self.compiled.statement.table)
|
||||
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
|
||||
|
||||
|
||||
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
|
||||
jdbc_db_name = 'jtds:sqlserver'
|
||||
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
|
||||
|
||||
execution_ctx_cls = MSExecutionContext_zxjdbc
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
return tuple(int(x) for x in connection.connection.dbversion.split('.'))
|
||||
|
||||
dialect = MSDialect_zxjdbc
|
17
sqlalchemy/dialects/mysql/__init__.py
Normal file
17
sqlalchemy/dialects/mysql/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from sqlalchemy.dialects.mysql import base, mysqldb, oursql, pyodbc, zxjdbc, mysqlconnector
|
||||
|
||||
# default dialect
|
||||
base.dialect = mysqldb.dialect
|
||||
|
||||
from sqlalchemy.dialects.mysql.base import \
|
||||
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, DOUBLE, ENUM, DECIMAL,\
|
||||
FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
|
||||
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT,\
|
||||
VARBINARY, VARCHAR, YEAR, dialect
|
||||
|
||||
__all__ = (
|
||||
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
|
||||
'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT',
|
||||
'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
|
||||
'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect'
|
||||
)
|
2528
sqlalchemy/dialects/mysql/base.py
Normal file
2528
sqlalchemy/dialects/mysql/base.py
Normal file
File diff suppressed because it is too large
Load Diff
132
sqlalchemy/dialects/mysql/mysqlconnector.py
Normal file
132
sqlalchemy/dialects/mysql/mysqlconnector.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Support for the MySQL database via the MySQL Connector/Python adapter.
|
||||
|
||||
MySQL Connector/Python is available at:
|
||||
|
||||
https://launchpad.net/myconnpy
|
||||
|
||||
Connecting
|
||||
-----------
|
||||
|
||||
Connect string format::
|
||||
|
||||
mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from sqlalchemy.dialects.mysql.base import (MySQLDialect,
|
||||
MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer,
|
||||
BIT)
|
||||
|
||||
from sqlalchemy.engine import base as engine_base, default
|
||||
from sqlalchemy.sql import operators as sql_operators
|
||||
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
|
||||
from sqlalchemy import processors
|
||||
|
||||
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
|
||||
|
||||
def get_lastrowid(self):
|
||||
return self.cursor.lastrowid
|
||||
|
||||
|
||||
class MySQLCompiler_mysqlconnector(MySQLCompiler):
|
||||
def visit_mod(self, binary, **kw):
|
||||
return self.process(binary.left) + " %% " + self.process(binary.right)
|
||||
|
||||
def post_process_text(self, text):
|
||||
return text.replace('%', '%%')
|
||||
|
||||
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
|
||||
|
||||
def _escape_identifier(self, value):
|
||||
value = value.replace(self.escape_quote, self.escape_to_quote)
|
||||
return value.replace("%", "%%")
|
||||
|
||||
class _myconnpyBIT(BIT):
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""MySQL-connector already converts mysql bits, so."""
|
||||
|
||||
return None
|
||||
|
||||
class MySQLDialect_mysqlconnector(MySQLDialect):
|
||||
driver = 'mysqlconnector'
|
||||
supports_unicode_statements = True
|
||||
supports_unicode_binds = True
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
default_paramstyle = 'format'
|
||||
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
|
||||
statement_compiler = MySQLCompiler_mysqlconnector
|
||||
|
||||
preparer = MySQLIdentifierPreparer_mysqlconnector
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MySQLDialect.colspecs,
|
||||
{
|
||||
BIT: _myconnpyBIT,
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
from mysql import connector
|
||||
return connector
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
opts.update(url.query)
|
||||
|
||||
util.coerce_kw_type(opts, 'buffered', bool)
|
||||
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
|
||||
opts['buffered'] = True
|
||||
opts['raise_on_warnings'] = True
|
||||
|
||||
# FOUND_ROWS must be set in ClientFlag to enable
|
||||
# supports_sane_rowcount.
|
||||
if self.dbapi is not None:
|
||||
try:
|
||||
from mysql.connector.constants import ClientFlag
|
||||
client_flags = opts.get('client_flags', ClientFlag.get_default())
|
||||
client_flags |= ClientFlag.FOUND_ROWS
|
||||
opts['client_flags'] = client_flags
|
||||
except:
|
||||
pass
|
||||
return [[], opts]
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
dbapi_con = connection.connection
|
||||
|
||||
from mysql.connector.constants import ClientFlag
|
||||
dbapi_con.set_client_flag(ClientFlag.FOUND_ROWS)
|
||||
|
||||
version = dbapi_con.get_server_version()
|
||||
return tuple(version)
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
return connection.connection.get_characterset_info()
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
try:
|
||||
return exception.orig.errno
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
def is_disconnect(self, e):
|
||||
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
|
||||
exceptions = (self.dbapi.OperationalError,self.dbapi.InterfaceError)
|
||||
if isinstance(e, exceptions):
|
||||
return e.errno in errnos
|
||||
else:
|
||||
return False
|
||||
|
||||
def _compat_fetchall(self, rp, charset=None):
|
||||
return rp.fetchall()
|
||||
|
||||
def _compat_fetchone(self, rp, charset=None):
|
||||
return rp.fetchone()
|
||||
|
||||
dialect = MySQLDialect_mysqlconnector
|
202
sqlalchemy/dialects/mysql/mysqldb.py
Normal file
202
sqlalchemy/dialects/mysql/mysqldb.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""Support for the MySQL database via the MySQL-python adapter.
|
||||
|
||||
MySQL-Python is available at:
|
||||
|
||||
http://sourceforge.net/projects/mysql-python
|
||||
|
||||
At least version 1.2.1 or 1.2.2 should be used.
|
||||
|
||||
Connecting
|
||||
-----------
|
||||
|
||||
Connect string format::
|
||||
|
||||
mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
|
||||
Character Sets
|
||||
--------------
|
||||
|
||||
Many MySQL server installations default to a ``latin1`` encoding for client
|
||||
connections. All data sent through the connection will be converted into
|
||||
``latin1``, even if you have ``utf8`` or another character set on your tables
|
||||
and columns. With versions 4.1 and higher, you can change the connection
|
||||
character set either through server configuration or by including the
|
||||
``charset`` parameter in the URL used for ``create_engine``. The ``charset``
|
||||
option is passed through to MySQL-Python and has the side-effect of also
|
||||
enabling ``use_unicode`` in the driver by default. For regular encoded
|
||||
strings, also pass ``use_unicode=0`` in the connection arguments::
|
||||
|
||||
# set client encoding to utf8; all strings come back as unicode
|
||||
create_engine('mysql+mysqldb:///mydb?charset=utf8')
|
||||
|
||||
# set client encoding to utf8; all strings come back as utf8 str
|
||||
create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0')
|
||||
|
||||
Known Issues
|
||||
-------------
|
||||
|
||||
MySQL-python at least as of version 1.2.2 has a serious memory leak related
|
||||
to unicode conversion, a feature which is disabled via ``use_unicode=0``.
|
||||
The recommended connection form with SQLAlchemy is::
|
||||
|
||||
engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600)
|
||||
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext,
|
||||
MySQLCompiler, MySQLIdentifierPreparer)
|
||||
from sqlalchemy.engine import base as engine_base, default
|
||||
from sqlalchemy.sql import operators as sql_operators
|
||||
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
|
||||
from sqlalchemy import processors
|
||||
|
||||
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
if hasattr(self, '_rowcount'):
|
||||
return self._rowcount
|
||||
else:
|
||||
return self.cursor.rowcount
|
||||
|
||||
|
||||
class MySQLCompiler_mysqldb(MySQLCompiler):
|
||||
def visit_mod(self, binary, **kw):
|
||||
return self.process(binary.left) + " %% " + self.process(binary.right)
|
||||
|
||||
def post_process_text(self, text):
|
||||
return text.replace('%', '%%')
|
||||
|
||||
|
||||
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
|
||||
|
||||
def _escape_identifier(self, value):
|
||||
value = value.replace(self.escape_quote, self.escape_to_quote)
|
||||
return value.replace("%", "%%")
|
||||
|
||||
class MySQLDialect_mysqldb(MySQLDialect):
|
||||
driver = 'mysqldb'
|
||||
supports_unicode_statements = False
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
default_paramstyle = 'format'
|
||||
execution_ctx_cls = MySQLExecutionContext_mysqldb
|
||||
statement_compiler = MySQLCompiler_mysqldb
|
||||
preparer = MySQLIdentifierPreparer_mysqldb
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MySQLDialect.colspecs,
|
||||
{
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
return __import__('MySQLdb')
|
||||
|
||||
def do_executemany(self, cursor, statement, parameters, context=None):
|
||||
rowcount = cursor.executemany(statement, parameters)
|
||||
if context is not None:
|
||||
context._rowcount = rowcount
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(database='db', username='user',
|
||||
password='passwd')
|
||||
opts.update(url.query)
|
||||
|
||||
util.coerce_kw_type(opts, 'compress', bool)
|
||||
util.coerce_kw_type(opts, 'connect_timeout', int)
|
||||
util.coerce_kw_type(opts, 'client_flag', int)
|
||||
util.coerce_kw_type(opts, 'local_infile', int)
|
||||
# Note: using either of the below will cause all strings to be returned
|
||||
# as Unicode, both in raw SQL operations and with column types like
|
||||
# String and MSString.
|
||||
util.coerce_kw_type(opts, 'use_unicode', bool)
|
||||
util.coerce_kw_type(opts, 'charset', str)
|
||||
|
||||
# Rich values 'cursorclass' and 'conv' are not supported via
|
||||
# query string.
|
||||
|
||||
ssl = {}
|
||||
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
|
||||
if key in opts:
|
||||
ssl[key[4:]] = opts[key]
|
||||
util.coerce_kw_type(ssl, key[4:], str)
|
||||
del opts[key]
|
||||
if ssl:
|
||||
opts['ssl'] = ssl
|
||||
|
||||
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
|
||||
# supports_sane_rowcount.
|
||||
client_flag = opts.get('client_flag', 0)
|
||||
if self.dbapi is not None:
|
||||
try:
|
||||
from MySQLdb.constants import CLIENT as CLIENT_FLAGS
|
||||
client_flag |= CLIENT_FLAGS.FOUND_ROWS
|
||||
except:
|
||||
pass
|
||||
opts['client_flag'] = client_flag
|
||||
return [[], opts]
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
dbapi_con = connection.connection
|
||||
version = []
|
||||
r = re.compile('[.\-]')
|
||||
for n in r.split(dbapi_con.get_server_info()):
|
||||
try:
|
||||
version.append(int(n))
|
||||
except ValueError:
|
||||
version.append(n)
|
||||
return tuple(version)
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
try:
|
||||
return exception.orig.args[0]
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
|
||||
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
|
||||
# on a connection via set_character_set()
|
||||
if self.server_version_info < (4, 1, 0):
|
||||
try:
|
||||
return connection.connection.character_set_name()
|
||||
except AttributeError:
|
||||
# < 1.2.1 final MySQL-python drivers have no charset support.
|
||||
# a query is needed.
|
||||
pass
|
||||
|
||||
# Prefer 'character_set_results' for the current connection over the
|
||||
# value in the driver. SET NAMES or individual variable SETs will
|
||||
# change the charset without updating the driver's view of the world.
|
||||
#
|
||||
# If it's decided that issuing that sort of SQL leaves you SOL, then
|
||||
# this can prefer the driver value.
|
||||
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
|
||||
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
|
||||
|
||||
if 'character_set_results' in opts:
|
||||
return opts['character_set_results']
|
||||
try:
|
||||
return connection.connection.character_set_name()
|
||||
except AttributeError:
|
||||
# Still no charset on < 1.2.1 final...
|
||||
if 'character_set' in opts:
|
||||
return opts['character_set']
|
||||
else:
|
||||
util.warn(
|
||||
"Could not detect the connection character set with this "
|
||||
"combination of MySQL server and MySQL-python. "
|
||||
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
|
||||
return 'latin1'
|
||||
|
||||
|
||||
dialect = MySQLDialect_mysqldb
|
255
sqlalchemy/dialects/mysql/oursql.py
Normal file
255
sqlalchemy/dialects/mysql/oursql.py
Normal file
@@ -0,0 +1,255 @@
|
||||
"""Support for the MySQL database via the oursql adapter.
|
||||
|
||||
OurSQL is available at:
|
||||
|
||||
http://packages.python.org/oursql/
|
||||
|
||||
Connecting
|
||||
-----------
|
||||
|
||||
Connect string format::
|
||||
|
||||
mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
|
||||
Character Sets
|
||||
--------------
|
||||
|
||||
oursql defaults to using ``utf8`` as the connection charset, but other
|
||||
encodings may be used instead. Like the MySQL-Python driver, unicode support
|
||||
can be completely disabled::
|
||||
|
||||
# oursql sets the connection charset to utf8 automatically; all strings come
|
||||
# back as utf8 str
|
||||
create_engine('mysql+oursql:///mydb?use_unicode=0')
|
||||
|
||||
To not automatically use ``utf8`` and instead use whatever the connection
|
||||
defaults to, there is a separate parameter::
|
||||
|
||||
# use the default connection charset; all strings come back as unicode
|
||||
create_engine('mysql+oursql:///mydb?default_charset=1')
|
||||
|
||||
# use latin1 as the connection charset; all strings come back as unicode
|
||||
create_engine('mysql+oursql:///mydb?charset=latin1')
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from sqlalchemy.dialects.mysql.base import (BIT, MySQLDialect, MySQLExecutionContext,
|
||||
MySQLCompiler, MySQLIdentifierPreparer)
|
||||
from sqlalchemy.engine import base as engine_base, default
|
||||
from sqlalchemy.sql import operators as sql_operators
|
||||
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
|
||||
from sqlalchemy import processors
|
||||
|
||||
|
||||
|
||||
class _oursqlBIT(BIT):
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""oursql already converts mysql bits, so."""
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class MySQLExecutionContext_oursql(MySQLExecutionContext):
|
||||
|
||||
@property
|
||||
def plain_query(self):
|
||||
return self.execution_options.get('_oursql_plain_query', False)
|
||||
|
||||
class MySQLDialect_oursql(MySQLDialect):
|
||||
driver = 'oursql'
|
||||
# Py3K
|
||||
# description_encoding = None
|
||||
# Py2K
|
||||
supports_unicode_binds = True
|
||||
supports_unicode_statements = True
|
||||
# end Py2K
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
execution_ctx_cls = MySQLExecutionContext_oursql
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MySQLDialect.colspecs,
|
||||
{
|
||||
sqltypes.Time: sqltypes.Time,
|
||||
BIT: _oursqlBIT,
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
return __import__('oursql')
|
||||
|
||||
def do_execute(self, cursor, statement, parameters, context=None):
|
||||
"""Provide an implementation of *cursor.execute(statement, parameters)*."""
|
||||
|
||||
if context and context.plain_query:
|
||||
cursor.execute(statement, plain_query=True)
|
||||
else:
|
||||
cursor.execute(statement, parameters)
|
||||
|
||||
def do_begin(self, connection):
|
||||
connection.cursor().execute('BEGIN', plain_query=True)
|
||||
|
||||
def _xa_query(self, connection, query, xid):
|
||||
# Py2K
|
||||
arg = connection.connection._escape_string(xid)
|
||||
# end Py2K
|
||||
# Py3K
|
||||
# charset = self._connection_charset
|
||||
# arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
|
||||
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
|
||||
|
||||
# Because mysql is bad, these methods have to be
|
||||
# reimplemented to use _PlainQuery. Basically, some queries
|
||||
# refuse to return any data if they're run through
|
||||
# the parameterized query API, or refuse to be parameterized
|
||||
# in the first place.
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
self._xa_query(connection, 'XA BEGIN "%s"', xid)
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
self._xa_query(connection, 'XA END "%s"', xid)
|
||||
self._xa_query(connection, 'XA PREPARE "%s"', xid)
|
||||
|
||||
def do_rollback_twophase(self, connection, xid, is_prepared=True,
|
||||
recover=False):
|
||||
if not is_prepared:
|
||||
self._xa_query(connection, 'XA END "%s"', xid)
|
||||
self._xa_query(connection, 'XA ROLLBACK "%s"', xid)
|
||||
|
||||
def do_commit_twophase(self, connection, xid, is_prepared=True,
|
||||
recover=False):
|
||||
if not is_prepared:
|
||||
self.do_prepare_twophase(connection, xid)
|
||||
self._xa_query(connection, 'XA COMMIT "%s"', xid)
|
||||
|
||||
# Q: why didn't we need all these "plain_query" overrides earlier ?
|
||||
# am i on a newer/older version of OurSQL ?
|
||||
def has_table(self, connection, table_name, schema=None):
|
||||
return MySQLDialect.has_table(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
table_name, schema)
|
||||
|
||||
def get_table_options(self, connection, table_name, schema=None, **kw):
|
||||
return MySQLDialect.get_table_options(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
table_name,
|
||||
schema = schema,
|
||||
**kw
|
||||
)
|
||||
|
||||
|
||||
def get_columns(self, connection, table_name, schema=None, **kw):
|
||||
return MySQLDialect.get_columns(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
table_name,
|
||||
schema=schema,
|
||||
**kw
|
||||
)
|
||||
|
||||
def get_view_names(self, connection, schema=None, **kw):
|
||||
return MySQLDialect.get_view_names(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
schema=schema,
|
||||
**kw
|
||||
)
|
||||
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
return MySQLDialect.get_table_names(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
schema
|
||||
)
|
||||
|
||||
def get_schema_names(self, connection, **kw):
|
||||
return MySQLDialect.get_schema_names(self,
|
||||
connection.connect().\
|
||||
execution_options(_oursql_plain_query=True),
|
||||
**kw
|
||||
)
|
||||
|
||||
def initialize(self, connection):
|
||||
return MySQLDialect.initialize(
|
||||
self,
|
||||
connection.execution_options(_oursql_plain_query=True)
|
||||
)
|
||||
|
||||
def _show_create_table(self, connection, table, charset=None,
|
||||
full_name=None):
|
||||
return MySQLDialect._show_create_table(self,
|
||||
connection.contextual_connect(close_with_result=True).
|
||||
execution_options(_oursql_plain_query=True),
|
||||
table, charset, full_name)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, self.dbapi.ProgrammingError):
|
||||
return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
|
||||
else:
|
||||
return e.errno in (2006, 2013, 2014, 2045, 2055)
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(database='db', username='user',
|
||||
password='passwd')
|
||||
opts.update(url.query)
|
||||
|
||||
util.coerce_kw_type(opts, 'port', int)
|
||||
util.coerce_kw_type(opts, 'compress', bool)
|
||||
util.coerce_kw_type(opts, 'autoping', bool)
|
||||
|
||||
util.coerce_kw_type(opts, 'default_charset', bool)
|
||||
if opts.pop('default_charset', False):
|
||||
opts['charset'] = None
|
||||
else:
|
||||
util.coerce_kw_type(opts, 'charset', str)
|
||||
opts['use_unicode'] = opts.get('use_unicode', True)
|
||||
util.coerce_kw_type(opts, 'use_unicode', bool)
|
||||
|
||||
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
|
||||
# supports_sane_rowcount.
|
||||
opts.setdefault('found_rows', True)
|
||||
|
||||
return [[], opts]
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
dbapi_con = connection.connection
|
||||
version = []
|
||||
r = re.compile('[.\-]')
|
||||
for n in r.split(dbapi_con.server_info):
|
||||
try:
|
||||
version.append(int(n))
|
||||
except ValueError:
|
||||
version.append(n)
|
||||
return tuple(version)
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
try:
|
||||
return exception.orig.errno
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
|
||||
return connection.connection.charset
|
||||
|
||||
def _compat_fetchall(self, rp, charset=None):
|
||||
"""oursql isn't super-broken like MySQLdb, yaaay."""
|
||||
return rp.fetchall()
|
||||
|
||||
def _compat_fetchone(self, rp, charset=None):
|
||||
"""oursql isn't super-broken like MySQLdb, yaaay."""
|
||||
return rp.fetchone()
|
||||
|
||||
def _compat_first(self, rp, charset=None):
|
||||
return rp.first()
|
||||
|
||||
|
||||
dialect = MySQLDialect_oursql
|
76
sqlalchemy/dialects/mysql/pyodbc.py
Normal file
76
sqlalchemy/dialects/mysql/pyodbc.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Support for the MySQL database via the pyodbc adapter.
|
||||
|
||||
pyodbc is available at:
|
||||
|
||||
http://pypi.python.org/pypi/pyodbc/
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
Connect string::
|
||||
|
||||
mysql+pyodbc://<username>:<password>@<dsnname>
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
The mysql-pyodbc dialect is subject to unresolved character encoding issues
|
||||
which exist within the current ODBC drivers available.
|
||||
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
|
||||
of OurSQL, MySQLdb, or MySQL-connector/Python.
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext
|
||||
from sqlalchemy.connectors.pyodbc import PyODBCConnector
|
||||
from sqlalchemy.engine import base as engine_base
|
||||
from sqlalchemy import util
|
||||
import re
|
||||
|
||||
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
|
||||
|
||||
def get_lastrowid(self):
|
||||
cursor = self.create_cursor()
|
||||
cursor.execute("SELECT LAST_INSERT_ID()")
|
||||
lastrowid = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return lastrowid
|
||||
|
||||
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
|
||||
supports_unicode_statements = False
|
||||
execution_ctx_cls = MySQLExecutionContext_pyodbc
|
||||
|
||||
pyodbc_driver_name = "MySQL"
|
||||
|
||||
def __init__(self, **kw):
|
||||
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
|
||||
kw.setdefault('convert_unicode', True)
|
||||
super(MySQLDialect_pyodbc, self).__init__(**kw)
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
|
||||
# Prefer 'character_set_results' for the current connection over the
|
||||
# value in the driver. SET NAMES or individual variable SETs will
|
||||
# change the charset without updating the driver's view of the world.
|
||||
#
|
||||
# If it's decided that issuing that sort of SQL leaves you SOL, then
|
||||
# this can prefer the driver value.
|
||||
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
|
||||
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
|
||||
for key in ('character_set_connection', 'character_set'):
|
||||
if opts.get(key, None):
|
||||
return opts[key]
|
||||
|
||||
util.warn("Could not detect the connection character set. Assuming latin1.")
|
||||
return 'latin1'
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
m = re.compile(r"\((\d+)\)").search(str(exception.orig.args))
|
||||
c = m.group(1)
|
||||
if c:
|
||||
return int(c)
|
||||
else:
|
||||
return None
|
||||
|
||||
dialect = MySQLDialect_pyodbc
|
111
sqlalchemy/dialects/mysql/zxjdbc.py
Normal file
111
sqlalchemy/dialects/mysql/zxjdbc.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Support for the MySQL database via Jython's zxjdbc JDBC connector.
|
||||
|
||||
JDBC Driver
|
||||
-----------
|
||||
|
||||
The official MySQL JDBC driver is at
|
||||
http://dev.mysql.com/downloads/connector/j/.
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
Connect string format:
|
||||
|
||||
mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
|
||||
|
||||
Character Sets
|
||||
--------------
|
||||
|
||||
SQLAlchemy zxjdbc dialects pass unicode straight through to the
|
||||
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
|
||||
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
|
||||
``characterEncoding`` connection property to ``UTF-8``. It may be
|
||||
overriden via a ``create_engine`` URL parameter.
|
||||
|
||||
"""
|
||||
import re
|
||||
|
||||
from sqlalchemy import types as sqltypes, util
|
||||
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
|
||||
from sqlalchemy.dialects.mysql.base import BIT, MySQLDialect, MySQLExecutionContext
|
||||
|
||||
class _ZxJDBCBit(BIT):
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
|
||||
def process(value):
|
||||
if value is None:
|
||||
return value
|
||||
if isinstance(value, bool):
|
||||
return int(value)
|
||||
v = 0L
|
||||
for i in value:
|
||||
v = v << 8 | (i & 0xff)
|
||||
value = v
|
||||
return value
|
||||
return process
|
||||
|
||||
|
||||
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
|
||||
def get_lastrowid(self):
|
||||
cursor = self.create_cursor()
|
||||
cursor.execute("SELECT LAST_INSERT_ID()")
|
||||
lastrowid = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return lastrowid
|
||||
|
||||
|
||||
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
|
||||
jdbc_db_name = 'mysql'
|
||||
jdbc_driver_name = 'com.mysql.jdbc.Driver'
|
||||
|
||||
execution_ctx_cls = MySQLExecutionContext_zxjdbc
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MySQLDialect.colspecs,
|
||||
{
|
||||
sqltypes.Time: sqltypes.Time,
|
||||
BIT: _ZxJDBCBit
|
||||
}
|
||||
)
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
# Prefer 'character_set_results' for the current connection over the
|
||||
# value in the driver. SET NAMES or individual variable SETs will
|
||||
# change the charset without updating the driver's view of the world.
|
||||
#
|
||||
# If it's decided that issuing that sort of SQL leaves you SOL, then
|
||||
# this can prefer the driver value.
|
||||
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
|
||||
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
|
||||
for key in ('character_set_connection', 'character_set'):
|
||||
if opts.get(key, None):
|
||||
return opts[key]
|
||||
|
||||
util.warn("Could not detect the connection character set. Assuming latin1.")
|
||||
return 'latin1'
|
||||
|
||||
def _driver_kwargs(self):
|
||||
"""return kw arg dict to be sent to connect()."""
|
||||
return dict(characterEncoding='UTF-8', yearIsDateType='false')
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
|
||||
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
|
||||
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.orig.args))
|
||||
c = m.group(1)
|
||||
if c:
|
||||
return int(c)
|
||||
|
||||
def _get_server_version_info(self,connection):
|
||||
dbapi_con = connection.connection
|
||||
version = []
|
||||
r = re.compile('[.\-]')
|
||||
for n in r.split(dbapi_con.dbversion):
|
||||
try:
|
||||
version.append(int(n))
|
||||
except ValueError:
|
||||
version.append(n)
|
||||
return tuple(version)
|
||||
|
||||
dialect = MySQLDialect_zxjdbc
|
17
sqlalchemy/dialects/oracle/__init__.py
Normal file
17
sqlalchemy/dialects/oracle/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc
|
||||
|
||||
base.dialect = cx_oracle.dialect
|
||||
|
||||
from sqlalchemy.dialects.oracle.base import \
|
||||
VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\
|
||||
BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
|
||||
FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
|
||||
VARCHAR2, NVARCHAR2
|
||||
|
||||
|
||||
__all__ = (
|
||||
'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'DATETIME', 'NUMBER',
|
||||
'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
|
||||
'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
|
||||
'VARCHAR2', 'NVARCHAR2'
|
||||
)
|
1030
sqlalchemy/dialects/oracle/base.py
Normal file
1030
sqlalchemy/dialects/oracle/base.py
Normal file
File diff suppressed because it is too large
Load Diff
529
sqlalchemy/dialects/oracle/cx_oracle.py
Normal file
529
sqlalchemy/dialects/oracle/cx_oracle.py
Normal file
@@ -0,0 +1,529 @@
|
||||
"""Support for the Oracle database via the cx_oracle driver.
|
||||
|
||||
Driver
|
||||
------
|
||||
|
||||
The Oracle dialect uses the cx_oracle driver, available at
|
||||
http://cx-oracle.sourceforge.net/ . The dialect has several behaviors
|
||||
which are specifically tailored towards compatibility with this module.
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
Connecting with create_engine() uses the standard URL approach of
|
||||
``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the
|
||||
host, port, and dbname tokens are converted to a TNS name using the cx_oracle
|
||||
:func:`makedsn()` function. Otherwise, the host token is taken directly as a TNS name.
|
||||
|
||||
Additional arguments which may be specified either as query string arguments on the
|
||||
URL, or as keyword arguments to :func:`~sqlalchemy.create_engine()` are:
|
||||
|
||||
* *allow_twophase* - enable two-phase transactions. Defaults to ``True``.
|
||||
|
||||
* *arraysize* - set the cx_oracle.arraysize value on cursors, in SQLAlchemy
|
||||
it defaults to 50. See the section on "LOB Objects" below.
|
||||
|
||||
* *auto_convert_lobs* - defaults to True, see the section on LOB objects.
|
||||
|
||||
* *auto_setinputsizes* - the cx_oracle.setinputsizes() call is issued for all bind parameters.
|
||||
This is required for LOB datatypes but can be disabled to reduce overhead. Defaults
|
||||
to ``True``.
|
||||
|
||||
* *mode* - This is given the string value of SYSDBA or SYSOPER, or alternatively an
|
||||
integer value. This value is only available as a URL query string argument.
|
||||
|
||||
* *threaded* - enable multithreaded access to cx_oracle connections. Defaults
|
||||
to ``True``. Note that this is the opposite default of cx_oracle itself.
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
As of cx_oracle 5, Python unicode objects can be bound directly to statements,
|
||||
and it appears that cx_oracle can handle these even without NLS_LANG being set.
|
||||
SQLAlchemy tests for version 5 and will pass unicode objects straight to cx_oracle
|
||||
if this is the case. For older versions of cx_oracle, SQLAlchemy will encode bind
|
||||
parameters normally using dialect.encoding as the encoding.
|
||||
|
||||
LOB Objects
|
||||
-----------
|
||||
|
||||
cx_oracle presents some challenges when fetching LOB objects. A LOB object in a result set
|
||||
is presented by cx_oracle as a cx_oracle.LOB object which has a read() method. By default,
|
||||
SQLAlchemy converts these LOB objects into Python strings. This is for two reasons. First,
|
||||
the LOB object requires an active cursor association, meaning if you were to fetch many rows
|
||||
at once such that cx_oracle had to go back to the database and fetch a new batch of rows,
|
||||
the LOB objects in the already-fetched rows are now unreadable and will raise an error.
|
||||
SQLA "pre-reads" all LOBs so that their data is fetched before further rows are read.
|
||||
The size of a "batch of rows" is controlled by the cursor.arraysize value, which SQLAlchemy
|
||||
defaults to 50 (cx_oracle normally defaults this to one).
|
||||
|
||||
Secondly, the LOB object is not a standard DBAPI return value so SQLAlchemy seeks to
|
||||
"normalize" the results to look more like that of other DBAPIs.
|
||||
|
||||
The conversion of LOB objects by this dialect is unique in SQLAlchemy in that it takes place
|
||||
for all statement executions, even plain string-based statements for which SQLA has no awareness
|
||||
of result typing. This is so that calls like fetchmany() and fetchall() can work in all cases
|
||||
without raising cursor errors. The conversion of LOB in all cases, as well as the "prefetch"
|
||||
of LOB objects, can be disabled using auto_convert_lobs=False.
|
||||
|
||||
Two Phase Transaction Support
|
||||
-----------------------------
|
||||
|
||||
Two Phase transactions are implemented using XA transactions. Success has been reported
|
||||
with this feature but it should be regarded as experimental.
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, \
|
||||
RESERVED_WORDS, OracleExecutionContext
|
||||
from sqlalchemy.dialects.oracle import base as oracle
|
||||
from sqlalchemy.engine import base
|
||||
from sqlalchemy import types as sqltypes, util, exc
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
class _OracleNumeric(sqltypes.Numeric):
|
||||
# cx_oracle accepts Decimal objects, but returns
|
||||
# floats
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
class _OracleDate(sqltypes.Date):
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return value.date()
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
class _LOBMixin(object):
|
||||
def result_processor(self, dialect, coltype):
|
||||
if not dialect.auto_convert_lobs:
|
||||
# return the cx_oracle.LOB directly.
|
||||
return None
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return value.read()
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
class _NativeUnicodeMixin(object):
|
||||
# Py2K
|
||||
def bind_processor(self, dialect):
|
||||
if dialect._cx_oracle_with_unicode:
|
||||
def process(value):
|
||||
if value is None:
|
||||
return value
|
||||
else:
|
||||
return unicode(value)
|
||||
return process
|
||||
else:
|
||||
return super(_NativeUnicodeMixin, self).bind_processor(dialect)
|
||||
# end Py2K
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
# if we know cx_Oracle will return unicode,
|
||||
# don't process results
|
||||
if dialect._cx_oracle_with_unicode:
|
||||
return None
|
||||
elif self.convert_unicode != 'force' and \
|
||||
dialect._cx_oracle_native_nvarchar and \
|
||||
coltype in dialect._cx_oracle_unicode_types:
|
||||
return None
|
||||
else:
|
||||
return super(_NativeUnicodeMixin, self).result_processor(dialect, coltype)
|
||||
|
||||
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.FIXED_CHAR
|
||||
|
||||
class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return getattr(dbapi, 'UNICODE', dbapi.STRING)
|
||||
|
||||
class _OracleText(_LOBMixin, sqltypes.Text):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.CLOB
|
||||
|
||||
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
|
||||
pass
|
||||
|
||||
class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.NCLOB
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
|
||||
if lob_processor is None:
|
||||
return None
|
||||
|
||||
string_processor = _NativeUnicodeMixin.result_processor(self, dialect, coltype)
|
||||
|
||||
if string_processor is None:
|
||||
return lob_processor
|
||||
else:
|
||||
def process(value):
|
||||
return string_processor(lob_processor(value))
|
||||
return process
|
||||
|
||||
class _OracleInteger(sqltypes.Integer):
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_int(val):
|
||||
if val is not None:
|
||||
val = int(val)
|
||||
return val
|
||||
return to_int
|
||||
|
||||
class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.BLOB
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
class _OracleInterval(oracle.INTERVAL):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.INTERVAL
|
||||
|
||||
class _OracleRaw(oracle.RAW):
|
||||
pass
|
||||
|
||||
class OracleCompiler_cx_oracle(OracleCompiler):
|
||||
def bindparam_string(self, name):
|
||||
if self.preparer._bindparam_requires_quotes(name):
|
||||
quoted_name = '"%s"' % name
|
||||
self._quoted_bind_names[name] = quoted_name
|
||||
return OracleCompiler.bindparam_string(self, quoted_name)
|
||||
else:
|
||||
return OracleCompiler.bindparam_string(self, name)
|
||||
|
||||
|
||||
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
|
||||
|
||||
def pre_exec(self):
|
||||
quoted_bind_names = \
|
||||
getattr(self.compiled, '_quoted_bind_names', None)
|
||||
if quoted_bind_names:
|
||||
if not self.dialect.supports_unicode_binds:
|
||||
quoted_bind_names = \
|
||||
dict(
|
||||
(fromname, toname.encode(self.dialect.encoding))
|
||||
for fromname, toname in
|
||||
quoted_bind_names.items()
|
||||
)
|
||||
for param in self.parameters:
|
||||
for fromname, toname in quoted_bind_names.items():
|
||||
param[toname] = param[fromname]
|
||||
del param[fromname]
|
||||
|
||||
if self.dialect.auto_setinputsizes:
|
||||
# cx_oracle really has issues when you setinputsizes
|
||||
# on String, including that outparams/RETURNING
|
||||
# breaks for varchars
|
||||
self.set_input_sizes(quoted_bind_names,
|
||||
exclude_types=self.dialect._cx_oracle_string_types
|
||||
)
|
||||
|
||||
# if a single execute, check for outparams
|
||||
if len(self.compiled_parameters) == 1:
|
||||
for bindparam in self.compiled.binds.values():
|
||||
if bindparam.isoutparam:
|
||||
dbtype = bindparam.type.dialect_impl(self.dialect).\
|
||||
get_dbapi_type(self.dialect.dbapi)
|
||||
if not hasattr(self, 'out_parameters'):
|
||||
self.out_parameters = {}
|
||||
if dbtype is None:
|
||||
raise exc.InvalidRequestError("Cannot create out parameter for parameter "
|
||||
"%r - it's type %r is not supported by"
|
||||
" cx_oracle" %
|
||||
(name, bindparam.type)
|
||||
)
|
||||
name = self.compiled.bind_names[bindparam]
|
||||
self.out_parameters[name] = self.cursor.var(dbtype)
|
||||
self.parameters[0][quoted_bind_names.get(name, name)] = \
|
||||
self.out_parameters[name]
|
||||
|
||||
def create_cursor(self):
|
||||
c = self._connection.connection.cursor()
|
||||
if self.dialect.arraysize:
|
||||
c.arraysize = self.dialect.arraysize
|
||||
return c
|
||||
|
||||
def get_result_proxy(self):
|
||||
if hasattr(self, 'out_parameters') and self.compiled.returning:
|
||||
returning_params = dict(
|
||||
(k, v.getvalue())
|
||||
for k, v in self.out_parameters.items()
|
||||
)
|
||||
return ReturningResultProxy(self, returning_params)
|
||||
|
||||
result = None
|
||||
if self.cursor.description is not None:
|
||||
for column in self.cursor.description:
|
||||
type_code = column[1]
|
||||
if type_code in self.dialect._cx_oracle_binary_types:
|
||||
result = base.BufferedColumnResultProxy(self)
|
||||
|
||||
if result is None:
|
||||
result = base.ResultProxy(self)
|
||||
|
||||
if hasattr(self, 'out_parameters'):
|
||||
if self.compiled_parameters is not None and \
|
||||
len(self.compiled_parameters) == 1:
|
||||
result.out_parameters = out_parameters = {}
|
||||
|
||||
for bind, name in self.compiled.bind_names.items():
|
||||
if name in self.out_parameters:
|
||||
type = bind.type
|
||||
impl_type = type.dialect_impl(self.dialect)
|
||||
dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
|
||||
result_processor = impl_type.\
|
||||
result_processor(self.dialect,
|
||||
dbapi_type)
|
||||
if result_processor is not None:
|
||||
out_parameters[name] = \
|
||||
result_processor(self.out_parameters[name].getvalue())
|
||||
else:
|
||||
out_parameters[name] = self.out_parameters[name].getvalue()
|
||||
else:
|
||||
result.out_parameters = dict(
|
||||
(k, v.getvalue())
|
||||
for k, v in self.out_parameters.items()
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
|
||||
"""Support WITH_UNICODE in Python 2.xx.
|
||||
|
||||
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
|
||||
behavior under Python 2.x. This mode in some cases disallows
|
||||
and in other cases silently passes corrupted data when
|
||||
non-Python-unicode strings (a.k.a. plain old Python strings)
|
||||
are passed as arguments to connect(), the statement sent to execute(),
|
||||
or any of the bind parameter keys or values sent to execute().
|
||||
This optional context therefore ensures that all statements are
|
||||
passed as Python unicode objects.
|
||||
|
||||
"""
|
||||
def __init__(self, *arg, **kw):
|
||||
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
|
||||
self.statement = unicode(self.statement)
|
||||
|
||||
def _execute_scalar(self, stmt):
|
||||
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
|
||||
_execute_scalar(unicode(stmt))
|
||||
|
||||
class ReturningResultProxy(base.FullyBufferedResultProxy):
|
||||
"""Result proxy which stuffs the _returning clause + outparams into the fetch."""
|
||||
|
||||
def __init__(self, context, returning_params):
|
||||
self._returning_params = returning_params
|
||||
super(ReturningResultProxy, self).__init__(context)
|
||||
|
||||
def _cursor_description(self):
|
||||
returning = self.context.compiled.returning
|
||||
|
||||
ret = []
|
||||
for c in returning:
|
||||
if hasattr(c, 'name'):
|
||||
ret.append((c.name, c.type))
|
||||
else:
|
||||
ret.append((c.anon_label, c.type))
|
||||
return ret
|
||||
|
||||
def _buffer_rows(self):
|
||||
return [tuple(self._returning_params["ret_%d" % i]
|
||||
for i, c in enumerate(self._returning_params))]
|
||||
|
||||
class OracleDialect_cx_oracle(OracleDialect):
|
||||
execution_ctx_cls = OracleExecutionContext_cx_oracle
|
||||
statement_compiler = OracleCompiler_cx_oracle
|
||||
driver = "cx_oracle"
|
||||
|
||||
colspecs = colspecs = {
|
||||
sqltypes.Numeric: _OracleNumeric,
|
||||
sqltypes.Date : _OracleDate, # generic type, assume datetime.date is desired
|
||||
oracle.DATE: oracle.DATE, # non generic type - passthru
|
||||
sqltypes.LargeBinary : _OracleBinary,
|
||||
sqltypes.Boolean : oracle._OracleBoolean,
|
||||
sqltypes.Interval : _OracleInterval,
|
||||
oracle.INTERVAL : _OracleInterval,
|
||||
sqltypes.Text : _OracleText,
|
||||
sqltypes.String : _OracleString,
|
||||
sqltypes.UnicodeText : _OracleUnicodeText,
|
||||
sqltypes.CHAR : _OracleChar,
|
||||
sqltypes.Integer : _OracleInteger, # this is only needed for OUT parameters.
|
||||
# it would be nice if we could not use it otherwise.
|
||||
oracle.NUMBER : oracle.NUMBER, # don't let this get converted
|
||||
oracle.RAW: _OracleRaw,
|
||||
sqltypes.Unicode: _OracleNVarChar,
|
||||
sqltypes.NVARCHAR : _OracleNVarChar,
|
||||
}
|
||||
|
||||
|
||||
execute_sequence_format = list
|
||||
|
||||
def __init__(self,
|
||||
auto_setinputsizes=True,
|
||||
auto_convert_lobs=True,
|
||||
threaded=True,
|
||||
allow_twophase=True,
|
||||
arraysize=50, **kwargs):
|
||||
OracleDialect.__init__(self, **kwargs)
|
||||
self.threaded = threaded
|
||||
self.arraysize = arraysize
|
||||
self.allow_twophase = allow_twophase
|
||||
self.supports_timestamp = self.dbapi is None or hasattr(self.dbapi, 'TIMESTAMP' )
|
||||
self.auto_setinputsizes = auto_setinputsizes
|
||||
self.auto_convert_lobs = auto_convert_lobs
|
||||
|
||||
if hasattr(self.dbapi, 'version'):
|
||||
cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')])
|
||||
else:
|
||||
cx_oracle_ver = (0, 0, 0)
|
||||
|
||||
def types(*names):
|
||||
return set([
|
||||
getattr(self.dbapi, name, None) for name in names
|
||||
]).difference([None])
|
||||
|
||||
self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB")
|
||||
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
|
||||
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
|
||||
self.supports_unicode_binds = cx_oracle_ver >= (5, 0)
|
||||
self._cx_oracle_native_nvarchar = cx_oracle_ver >= (5, 0)
|
||||
|
||||
if cx_oracle_ver is None:
|
||||
# this occurs in tests with mock DBAPIs
|
||||
self._cx_oracle_string_types = set()
|
||||
self._cx_oracle_with_unicode = False
|
||||
elif cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
|
||||
# cx_Oracle WITH_UNICODE mode. *only* python
|
||||
# unicode objects accepted for anything
|
||||
self.supports_unicode_statements = True
|
||||
self.supports_unicode_binds = True
|
||||
self._cx_oracle_with_unicode = True
|
||||
# Py2K
|
||||
# There's really no reason to run with WITH_UNICODE under Python 2.x.
|
||||
# Give the user a hint.
|
||||
util.warn("cx_Oracle is compiled under Python 2.xx using the "
|
||||
"WITH_UNICODE flag. Consider recompiling cx_Oracle without "
|
||||
"this flag, which is in no way necessary for full support of Unicode. "
|
||||
"Otherwise, all string-holding bind parameters must "
|
||||
"be explicitly typed using SQLAlchemy's String type or one of its subtypes,"
|
||||
"or otherwise be passed as Python unicode. Plain Python strings "
|
||||
"passed as bind parameters will be silently corrupted by cx_Oracle."
|
||||
)
|
||||
self.execution_ctx_cls = OracleExecutionContext_cx_oracle_with_unicode
|
||||
# end Py2K
|
||||
else:
|
||||
self._cx_oracle_with_unicode = False
|
||||
|
||||
if cx_oracle_ver is None or \
|
||||
not self.auto_convert_lobs or \
|
||||
not hasattr(self.dbapi, 'CLOB'):
|
||||
self.dbapi_type_map = {}
|
||||
else:
|
||||
# only use this for LOB objects. using it for strings, dates
|
||||
# etc. leads to a little too much magic, reflection doesn't know if it should
|
||||
# expect encoded strings or unicodes, etc.
|
||||
self.dbapi_type_map = {
|
||||
self.dbapi.CLOB: oracle.CLOB(),
|
||||
self.dbapi.NCLOB:oracle.NCLOB(),
|
||||
self.dbapi.BLOB: oracle.BLOB(),
|
||||
self.dbapi.BINARY: oracle.RAW(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
import cx_Oracle
|
||||
return cx_Oracle
|
||||
|
||||
def create_connect_args(self, url):
|
||||
dialect_opts = dict(url.query)
|
||||
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
|
||||
'threaded', 'allow_twophase'):
|
||||
if opt in dialect_opts:
|
||||
util.coerce_kw_type(dialect_opts, opt, bool)
|
||||
setattr(self, opt, dialect_opts[opt])
|
||||
|
||||
if url.database:
|
||||
# if we have a database, then we have a remote host
|
||||
port = url.port
|
||||
if port:
|
||||
port = int(port)
|
||||
else:
|
||||
port = 1521
|
||||
dsn = self.dbapi.makedsn(url.host, port, url.database)
|
||||
else:
|
||||
# we have a local tnsname
|
||||
dsn = url.host
|
||||
|
||||
opts = dict(
|
||||
user=url.username,
|
||||
password=url.password,
|
||||
dsn=dsn,
|
||||
threaded=self.threaded,
|
||||
twophase=self.allow_twophase,
|
||||
)
|
||||
|
||||
# Py2K
|
||||
if self._cx_oracle_with_unicode:
|
||||
for k, v in opts.items():
|
||||
if isinstance(v, str):
|
||||
opts[k] = unicode(v)
|
||||
# end Py2K
|
||||
|
||||
if 'mode' in url.query:
|
||||
opts['mode'] = url.query['mode']
|
||||
if isinstance(opts['mode'], basestring):
|
||||
mode = opts['mode'].upper()
|
||||
if mode == 'SYSDBA':
|
||||
opts['mode'] = self.dbapi.SYSDBA
|
||||
elif mode == 'SYSOPER':
|
||||
opts['mode'] = self.dbapi.SYSOPER
|
||||
else:
|
||||
util.coerce_kw_type(opts, 'mode', int)
|
||||
return ([], opts)
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
return tuple(int(x) for x in connection.connection.version.split('.'))
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, self.dbapi.InterfaceError):
|
||||
return "not connected" in str(e)
|
||||
else:
|
||||
return "ORA-03114" in str(e) or "ORA-03113" in str(e)
|
||||
|
||||
def create_xid(self):
|
||||
"""create a two-phase transaction ID.
|
||||
|
||||
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
|
||||
do_commit_twophase(). its format is unspecified."""
|
||||
|
||||
id = random.randint(0, 2 ** 128)
|
||||
return (0x1234, "%032x" % id, "%032x" % 9)
|
||||
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
connection.connection.begin(*xid)
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
connection.connection.prepare()
|
||||
|
||||
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
|
||||
self.do_rollback(connection.connection)
|
||||
|
||||
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
|
||||
self.do_commit(connection.connection)
|
||||
|
||||
def do_recover_twophase(self, connection):
|
||||
pass
|
||||
|
||||
dialect = OracleDialect_cx_oracle
|
209
sqlalchemy/dialects/oracle/zxjdbc.py
Normal file
209
sqlalchemy/dialects/oracle/zxjdbc.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Support for the Oracle database via the zxjdbc JDBC connector.
|
||||
|
||||
JDBC Driver
|
||||
-----------
|
||||
|
||||
The official Oracle JDBC driver is at
|
||||
http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
|
||||
|
||||
"""
|
||||
import decimal
|
||||
import re
|
||||
|
||||
from sqlalchemy import sql, types as sqltypes, util
|
||||
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
|
||||
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
|
||||
from sqlalchemy.engine import base, default
|
||||
from sqlalchemy.sql import expression
|
||||
|
||||
SQLException = zxJDBC = None
|
||||
|
||||
class _ZxJDBCDate(sqltypes.Date):
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return value.date()
|
||||
return process
|
||||
|
||||
|
||||
class _ZxJDBCNumeric(sqltypes.Numeric):
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
#XXX: does the dialect return Decimal or not???
|
||||
# if it does (in all cases), we could use a None processor as well as
|
||||
# the to_float generic processor
|
||||
if self.asdecimal:
|
||||
def process(value):
|
||||
if isinstance(value, decimal.Decimal):
|
||||
return value
|
||||
else:
|
||||
return decimal.Decimal(str(value))
|
||||
else:
|
||||
def process(value):
|
||||
if isinstance(value, decimal.Decimal):
|
||||
return float(value)
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
|
||||
class OracleCompiler_zxjdbc(OracleCompiler):
|
||||
|
||||
def returning_clause(self, stmt, returning_cols):
|
||||
self.returning_cols = list(expression._select_iterables(returning_cols))
|
||||
|
||||
# within_columns_clause=False so that labels (foo AS bar) don't render
|
||||
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
|
||||
for c in self.returning_cols]
|
||||
|
||||
if not hasattr(self, 'returning_parameters'):
|
||||
self.returning_parameters = []
|
||||
|
||||
binds = []
|
||||
for i, col in enumerate(self.returning_cols):
|
||||
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
|
||||
self.returning_parameters.append((i + 1, dbtype))
|
||||
|
||||
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
|
||||
self.binds[bindparam.key] = bindparam
|
||||
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
|
||||
|
||||
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
|
||||
|
||||
|
||||
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
|
||||
|
||||
def pre_exec(self):
|
||||
if hasattr(self.compiled, 'returning_parameters'):
|
||||
# prepare a zxJDBC statement so we can grab its underlying
|
||||
# OraclePreparedStatement's getReturnResultSet later
|
||||
self.statement = self.cursor.prepare(self.statement)
|
||||
|
||||
def get_result_proxy(self):
|
||||
if hasattr(self.compiled, 'returning_parameters'):
|
||||
rrs = None
|
||||
try:
|
||||
try:
|
||||
rrs = self.statement.__statement__.getReturnResultSet()
|
||||
rrs.next()
|
||||
except SQLException, sqle:
|
||||
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
|
||||
if sqle.getSQLState() is not None:
|
||||
msg += ' [SQLState: %s]' % sqle.getSQLState()
|
||||
raise zxJDBC.Error(msg)
|
||||
else:
|
||||
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
|
||||
for index, dbtype in self.compiled.returning_parameters)
|
||||
return ReturningResultProxy(self, row)
|
||||
finally:
|
||||
if rrs is not None:
|
||||
try:
|
||||
rrs.close()
|
||||
except SQLException:
|
||||
pass
|
||||
self.statement.close()
|
||||
|
||||
return base.ResultProxy(self)
|
||||
|
||||
def create_cursor(self):
|
||||
cursor = self._connection.connection.cursor()
|
||||
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
|
||||
return cursor
|
||||
|
||||
|
||||
class ReturningResultProxy(base.FullyBufferedResultProxy):
|
||||
|
||||
"""ResultProxy backed by the RETURNING ResultSet results."""
|
||||
|
||||
def __init__(self, context, returning_row):
|
||||
self._returning_row = returning_row
|
||||
super(ReturningResultProxy, self).__init__(context)
|
||||
|
||||
def _cursor_description(self):
|
||||
ret = []
|
||||
for c in self.context.compiled.returning_cols:
|
||||
if hasattr(c, 'name'):
|
||||
ret.append((c.name, c.type))
|
||||
else:
|
||||
ret.append((c.anon_label, c.type))
|
||||
return ret
|
||||
|
||||
def _buffer_rows(self):
|
||||
return [self._returning_row]
|
||||
|
||||
|
||||
class ReturningParam(object):
|
||||
|
||||
"""A bindparam value representing a RETURNING parameter.
|
||||
|
||||
Specially handled by OracleReturningDataHandler.
|
||||
"""
|
||||
|
||||
def __init__(self, type):
|
||||
self.type = type
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, ReturningParam):
|
||||
return self.type == other.type
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
if isinstance(other, ReturningParam):
|
||||
return self.type != other.type
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self):
|
||||
kls = self.__class__
|
||||
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
|
||||
self.type)
|
||||
|
||||
|
||||
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
|
||||
jdbc_db_name = 'oracle'
|
||||
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
|
||||
|
||||
statement_compiler = OracleCompiler_zxjdbc
|
||||
execution_ctx_cls = OracleExecutionContext_zxjdbc
|
||||
|
||||
colspecs = util.update_copy(
|
||||
OracleDialect.colspecs,
|
||||
{
|
||||
sqltypes.Date : _ZxJDBCDate,
|
||||
sqltypes.Numeric: _ZxJDBCNumeric
|
||||
}
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
|
||||
global SQLException, zxJDBC
|
||||
from java.sql import SQLException
|
||||
from com.ziclix.python.sql import zxJDBC
|
||||
from com.ziclix.python.sql.handler import OracleDataHandler
|
||||
class OracleReturningDataHandler(OracleDataHandler):
|
||||
|
||||
"""zxJDBC DataHandler that specially handles ReturningParam."""
|
||||
|
||||
def setJDBCObject(self, statement, index, object, dbtype=None):
|
||||
if type(object) is ReturningParam:
|
||||
statement.registerReturnParameter(index, object.type)
|
||||
elif dbtype is None:
|
||||
OracleDataHandler.setJDBCObject(self, statement, index, object)
|
||||
else:
|
||||
OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype)
|
||||
self.DataHandler = OracleReturningDataHandler
|
||||
|
||||
def initialize(self, connection):
|
||||
super(OracleDialect_zxjdbc, self).initialize(connection)
|
||||
self.implicit_returning = connection.connection.driverversion >= '10.2'
|
||||
|
||||
def _create_jdbc_url(self, url):
|
||||
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
|
||||
return tuple(int(x) for x in version.split('.'))
|
||||
|
||||
dialect = OracleDialect_zxjdbc
|
10
sqlalchemy/dialects/postgres.py
Normal file
10
sqlalchemy/dialects/postgres.py
Normal file
@@ -0,0 +1,10 @@
|
||||
# backwards compat with the old name
|
||||
from sqlalchemy.util import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. "
|
||||
"The new URL format is postgresql[+driver]://<user>:<pass>@<host>/<dbname>"
|
||||
)
|
||||
|
||||
from sqlalchemy.dialects.postgresql import *
|
||||
from sqlalchemy.dialects.postgresql import base
|
14
sqlalchemy/dialects/postgresql/__init__.py
Normal file
14
sqlalchemy/dialects/postgresql/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from sqlalchemy.dialects.postgresql import base, psycopg2, pg8000, pypostgresql, zxjdbc
|
||||
|
||||
base.dialect = psycopg2.dialect
|
||||
|
||||
from sqlalchemy.dialects.postgresql.base import \
|
||||
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, INET, \
|
||||
CIDR, UUID, BIT, MACADDR, DOUBLE_PRECISION, TIMESTAMP, TIME,\
|
||||
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect
|
||||
|
||||
__all__ = (
|
||||
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET',
|
||||
'CIDR', 'UUID', 'BIT', 'MACADDR', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME',
|
||||
'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect'
|
||||
)
|
1161
sqlalchemy/dialects/postgresql/base.py
Normal file
1161
sqlalchemy/dialects/postgresql/base.py
Normal file
File diff suppressed because it is too large
Load Diff
105
sqlalchemy/dialects/postgresql/pg8000.py
Normal file
105
sqlalchemy/dialects/postgresql/pg8000.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Support for the PostgreSQL database via the pg8000 driver.
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
URLs are of the form
|
||||
`postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]`.
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
pg8000 requires that the postgresql client encoding be configured in the postgresql.conf file
|
||||
in order to use encodings other than ascii. Set this value to the same value as
|
||||
the "encoding" parameter on create_engine(), usually "utf-8".
|
||||
|
||||
Interval
|
||||
--------
|
||||
|
||||
Passing data from/to the Interval type is not supported as of yet.
|
||||
|
||||
"""
|
||||
import decimal
|
||||
|
||||
from sqlalchemy.engine import default
|
||||
from sqlalchemy import util, exc
|
||||
from sqlalchemy import processors
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy.dialects.postgresql.base import PGDialect, \
|
||||
PGCompiler, PGIdentifierPreparer, PGExecutionContext
|
||||
|
||||
class _PGNumeric(sqltypes.Numeric):
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
if coltype in (700, 701):
|
||||
return processors.to_decimal_processor_factory(decimal.Decimal)
|
||||
elif coltype == 1700:
|
||||
# pg8000 returns Decimal natively for 1700
|
||||
return None
|
||||
else:
|
||||
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
|
||||
else:
|
||||
if coltype in (700, 701):
|
||||
# pg8000 returns float natively for 701
|
||||
return None
|
||||
elif coltype == 1700:
|
||||
return processors.to_float
|
||||
else:
|
||||
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
|
||||
|
||||
class PGExecutionContext_pg8000(PGExecutionContext):
|
||||
pass
|
||||
|
||||
|
||||
class PGCompiler_pg8000(PGCompiler):
|
||||
def visit_mod(self, binary, **kw):
|
||||
return self.process(binary.left) + " %% " + self.process(binary.right)
|
||||
|
||||
def post_process_text(self, text):
|
||||
if '%%' in text:
|
||||
util.warn("The SQLAlchemy postgresql dialect now automatically escapes '%' in text() "
|
||||
"expressions to '%%'.")
|
||||
return text.replace('%', '%%')
|
||||
|
||||
|
||||
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
|
||||
def _escape_identifier(self, value):
|
||||
value = value.replace(self.escape_quote, self.escape_to_quote)
|
||||
return value.replace('%', '%%')
|
||||
|
||||
|
||||
class PGDialect_pg8000(PGDialect):
|
||||
driver = 'pg8000'
|
||||
|
||||
supports_unicode_statements = True
|
||||
|
||||
supports_unicode_binds = True
|
||||
|
||||
default_paramstyle = 'format'
|
||||
supports_sane_multi_rowcount = False
|
||||
execution_ctx_cls = PGExecutionContext_pg8000
|
||||
statement_compiler = PGCompiler_pg8000
|
||||
preparer = PGIdentifierPreparer_pg8000
|
||||
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric : _PGNumeric,
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
return __import__('pg8000').dbapi
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
if 'port' in opts:
|
||||
opts['port'] = int(opts['port'])
|
||||
opts.update(url.query)
|
||||
return ([], opts)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
return "connection is closed" in str(e)
|
||||
|
||||
dialect = PGDialect_pg8000
|
239
sqlalchemy/dialects/postgresql/psycopg2.py
Normal file
239
sqlalchemy/dialects/postgresql/psycopg2.py
Normal file
@@ -0,0 +1,239 @@
|
||||
"""Support for the PostgreSQL database via the psycopg2 driver.
|
||||
|
||||
Driver
|
||||
------
|
||||
|
||||
The psycopg2 driver is supported, available at http://pypi.python.org/pypi/psycopg2/ .
|
||||
The dialect has several behaviors which are specifically tailored towards compatibility
|
||||
with this module.
|
||||
|
||||
Note that psycopg1 is **not** supported.
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
URLs are of the form `postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]`.
|
||||
|
||||
psycopg2-specific keyword arguments which are accepted by :func:`~sqlalchemy.create_engine()` are:
|
||||
|
||||
* *server_side_cursors* - Enable the usage of "server side cursors" for SQL statements which support
|
||||
this feature. What this essentially means from a psycopg2 point of view is that the cursor is
|
||||
created using a name, e.g. `connection.cursor('some name')`, which has the effect that result rows
|
||||
are not immediately pre-fetched and buffered after statement execution, but are instead left
|
||||
on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.base.ResultProxy`
|
||||
uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows
|
||||
at a time are fetched over the wire to reduce conversational overhead.
|
||||
* *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode per connection. True
|
||||
by default.
|
||||
* *isolation_level* - Sets the transaction isolation level for each transaction
|
||||
within the engine. Valid isolation levels are `READ_COMMITTED`,
|
||||
`READ_UNCOMMITTED`, `REPEATABLE_READ`, and `SERIALIZABLE`.
|
||||
|
||||
Transactions
|
||||
------------
|
||||
|
||||
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
|
||||
|
||||
NOTICE logging
|
||||
---------------
|
||||
|
||||
The psycopg2 dialect will log Postgresql NOTICE messages via the
|
||||
``sqlalchemy.dialects.postgresql`` logger::
|
||||
|
||||
import logging
|
||||
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
|
||||
|
||||
|
||||
Per-Statement Execution Options
|
||||
-------------------------------
|
||||
|
||||
The following per-statement execution options are respected:
|
||||
|
||||
* *stream_results* - Enable or disable usage of server side cursors for the SELECT-statement.
|
||||
If *None* or not set, the *server_side_cursors* option of the connection is used. If
|
||||
auto-commit is enabled, the option is ignored.
|
||||
|
||||
"""
|
||||
|
||||
import random
|
||||
import re
|
||||
import decimal
|
||||
import logging
|
||||
|
||||
from sqlalchemy import util
|
||||
from sqlalchemy import processors
|
||||
from sqlalchemy.engine import base, default
|
||||
from sqlalchemy.sql import expression
|
||||
from sqlalchemy.sql import operators as sql_operators
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \
|
||||
PGIdentifierPreparer, PGExecutionContext, \
|
||||
ENUM, ARRAY
|
||||
|
||||
|
||||
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
|
||||
|
||||
|
||||
class _PGNumeric(sqltypes.Numeric):
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
if coltype in (700, 701):
|
||||
return processors.to_decimal_processor_factory(decimal.Decimal)
|
||||
elif coltype == 1700:
|
||||
# pg8000 returns Decimal natively for 1700
|
||||
return None
|
||||
else:
|
||||
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
|
||||
else:
|
||||
if coltype in (700, 701):
|
||||
# pg8000 returns float natively for 701
|
||||
return None
|
||||
elif coltype == 1700:
|
||||
return processors.to_float
|
||||
else:
|
||||
raise exc.InvalidRequestError("Unknown PG numeric type: %d" % coltype)
|
||||
|
||||
class _PGEnum(ENUM):
|
||||
def __init__(self, *arg, **kw):
|
||||
super(_PGEnum, self).__init__(*arg, **kw)
|
||||
if self.convert_unicode:
|
||||
self.convert_unicode = "force"
|
||||
|
||||
class _PGArray(ARRAY):
|
||||
def __init__(self, *arg, **kw):
|
||||
super(_PGArray, self).__init__(*arg, **kw)
|
||||
# FIXME: this check won't work for setups that
|
||||
# have convert_unicode only on their create_engine().
|
||||
if isinstance(self.item_type, sqltypes.String) and \
|
||||
self.item_type.convert_unicode:
|
||||
self.item_type.convert_unicode = "force"
|
||||
|
||||
# When we're handed literal SQL, ensure it's a SELECT-query. Since
|
||||
# 8.3, combining cursors and "FOR UPDATE" has been fine.
|
||||
SERVER_SIDE_CURSOR_RE = re.compile(
|
||||
r'\s*SELECT',
|
||||
re.I | re.UNICODE)
|
||||
|
||||
class PGExecutionContext_psycopg2(PGExecutionContext):
|
||||
def create_cursor(self):
|
||||
# TODO: coverage for server side cursors + select.for_update()
|
||||
|
||||
if self.dialect.server_side_cursors:
|
||||
is_server_side = \
|
||||
self.execution_options.get('stream_results', True) and (
|
||||
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
|
||||
or \
|
||||
(
|
||||
(not self.compiled or
|
||||
isinstance(self.compiled.statement, expression._TextClause))
|
||||
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
|
||||
)
|
||||
)
|
||||
else:
|
||||
is_server_side = self.execution_options.get('stream_results', False)
|
||||
|
||||
self.__is_server_side = is_server_side
|
||||
if is_server_side:
|
||||
# use server-side cursors:
|
||||
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
|
||||
ident = "c_%s_%s" % (hex(id(self))[2:], hex(random.randint(0, 65535))[2:])
|
||||
return self._connection.connection.cursor(ident)
|
||||
else:
|
||||
return self._connection.connection.cursor()
|
||||
|
||||
def get_result_proxy(self):
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
self._log_notices(self.cursor)
|
||||
|
||||
if self.__is_server_side:
|
||||
return base.BufferedRowResultProxy(self)
|
||||
else:
|
||||
return base.ResultProxy(self)
|
||||
|
||||
def _log_notices(self, cursor):
|
||||
for notice in cursor.connection.notices:
|
||||
# NOTICE messages have a
|
||||
# newline character at the end
|
||||
logger.info(notice.rstrip())
|
||||
|
||||
cursor.connection.notices[:] = []
|
||||
|
||||
|
||||
class PGCompiler_psycopg2(PGCompiler):
|
||||
def visit_mod(self, binary, **kw):
|
||||
return self.process(binary.left) + " %% " + self.process(binary.right)
|
||||
|
||||
def post_process_text(self, text):
|
||||
return text.replace('%', '%%')
|
||||
|
||||
|
||||
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
|
||||
def _escape_identifier(self, value):
|
||||
value = value.replace(self.escape_quote, self.escape_to_quote)
|
||||
return value.replace('%', '%%')
|
||||
|
||||
class PGDialect_psycopg2(PGDialect):
|
||||
driver = 'psycopg2'
|
||||
supports_unicode_statements = False
|
||||
default_paramstyle = 'pyformat'
|
||||
supports_sane_multi_rowcount = False
|
||||
execution_ctx_cls = PGExecutionContext_psycopg2
|
||||
statement_compiler = PGCompiler_psycopg2
|
||||
preparer = PGIdentifierPreparer_psycopg2
|
||||
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric : _PGNumeric,
|
||||
ENUM : _PGEnum, # needs force_unicode
|
||||
sqltypes.Enum : _PGEnum, # needs force_unicode
|
||||
ARRAY : _PGArray, # needs force_unicode
|
||||
}
|
||||
)
|
||||
|
||||
def __init__(self, server_side_cursors=False, use_native_unicode=True, **kwargs):
|
||||
PGDialect.__init__(self, **kwargs)
|
||||
self.server_side_cursors = server_side_cursors
|
||||
self.use_native_unicode = use_native_unicode
|
||||
self.supports_unicode_binds = use_native_unicode
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
psycopg = __import__('psycopg2')
|
||||
return psycopg
|
||||
|
||||
def on_connect(self):
|
||||
base_on_connect = super(PGDialect_psycopg2, self).on_connect()
|
||||
if self.dbapi and self.use_native_unicode:
|
||||
extensions = __import__('psycopg2.extensions').extensions
|
||||
def connect(conn):
|
||||
extensions.register_type(extensions.UNICODE, conn)
|
||||
if base_on_connect:
|
||||
base_on_connect(conn)
|
||||
return connect
|
||||
else:
|
||||
return base_on_connect
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
if 'port' in opts:
|
||||
opts['port'] = int(opts['port'])
|
||||
opts.update(url.query)
|
||||
return ([], opts)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, self.dbapi.OperationalError):
|
||||
return 'closed the connection' in str(e) or 'connection not open' in str(e)
|
||||
elif isinstance(e, self.dbapi.InterfaceError):
|
||||
return 'connection already closed' in str(e) or 'cursor already closed' in str(e)
|
||||
elif isinstance(e, self.dbapi.ProgrammingError):
|
||||
# yes, it really says "losed", not "closed"
|
||||
return "losed the connection unexpectedly" in str(e)
|
||||
else:
|
||||
return False
|
||||
|
||||
dialect = PGDialect_psycopg2
|
||||
|
69
sqlalchemy/dialects/postgresql/pypostgresql.py
Normal file
69
sqlalchemy/dialects/postgresql/pypostgresql.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""Support for the PostgreSQL database via py-postgresql.
|
||||
|
||||
Connecting
|
||||
----------
|
||||
|
||||
URLs are of the form `postgresql+pypostgresql://user@password@host:port/dbname[?key=value&key=value...]`.
|
||||
|
||||
|
||||
"""
|
||||
from sqlalchemy.engine import default
|
||||
import decimal
|
||||
from sqlalchemy import util
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy.dialects.postgresql.base import PGDialect, PGExecutionContext
|
||||
from sqlalchemy import processors
|
||||
|
||||
class PGNumeric(sqltypes.Numeric):
|
||||
def bind_processor(self, dialect):
|
||||
return processors.to_str
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
return None
|
||||
else:
|
||||
return processors.to_float
|
||||
|
||||
class PGExecutionContext_pypostgresql(PGExecutionContext):
|
||||
pass
|
||||
|
||||
class PGDialect_pypostgresql(PGDialect):
|
||||
driver = 'pypostgresql'
|
||||
|
||||
supports_unicode_statements = True
|
||||
supports_unicode_binds = True
|
||||
description_encoding = None
|
||||
default_paramstyle = 'pyformat'
|
||||
|
||||
# requires trunk version to support sane rowcounts
|
||||
# TODO: use dbapi version information to set this flag appropariately
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = False
|
||||
|
||||
execution_ctx_cls = PGExecutionContext_pypostgresql
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric : PGNumeric,
|
||||
sqltypes.Float: sqltypes.Float, # prevents PGNumeric from being used
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
from postgresql.driver import dbapi20
|
||||
return dbapi20
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user')
|
||||
if 'port' in opts:
|
||||
opts['port'] = int(opts['port'])
|
||||
else:
|
||||
opts['port'] = 5432
|
||||
opts.update(url.query)
|
||||
return ([], opts)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
return "connection is closed" in str(e)
|
||||
|
||||
dialect = PGDialect_pypostgresql
|
19
sqlalchemy/dialects/postgresql/zxjdbc.py
Normal file
19
sqlalchemy/dialects/postgresql/zxjdbc.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""Support for the PostgreSQL database via the zxjdbc JDBC connector.
|
||||
|
||||
JDBC Driver
|
||||
-----------
|
||||
|
||||
The official Postgresql JDBC driver is at http://jdbc.postgresql.org/.
|
||||
|
||||
"""
|
||||
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
|
||||
from sqlalchemy.dialects.postgresql.base import PGDialect
|
||||
|
||||
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
|
||||
jdbc_db_name = 'postgresql'
|
||||
jdbc_driver_name = 'org.postgresql.Driver'
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
return tuple(int(x) for x in connection.connection.dbversion.split('.'))
|
||||
|
||||
dialect = PGDialect_zxjdbc
|
14
sqlalchemy/dialects/sqlite/__init__.py
Normal file
14
sqlalchemy/dialects/sqlite/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from sqlalchemy.dialects.sqlite import base, pysqlite
|
||||
|
||||
# default dialect
|
||||
base.dialect = pysqlite.dialect
|
||||
|
||||
|
||||
from sqlalchemy.dialects.sqlite.base import \
|
||||
BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER,\
|
||||
NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect
|
||||
|
||||
__all__ = (
|
||||
'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'FLOAT', 'INTEGER',
|
||||
'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', 'VARCHAR', 'dialect'
|
||||
)
|
596
sqlalchemy/dialects/sqlite/base.py
Normal file
596
sqlalchemy/dialects/sqlite/base.py
Normal file
@@ -0,0 +1,596 @@
|
||||
# sqlite.py
|
||||
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
"""Support for the SQLite database.
|
||||
|
||||
For information on connecting using a specific driver, see the documentation
|
||||
section regarding that driver.
|
||||
|
||||
Date and Time Types
|
||||
-------------------
|
||||
|
||||
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
|
||||
out of the box functionality for translating values between Python `datetime` objects
|
||||
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
|
||||
and related types provide date formatting and parsing functionality when SQlite is used.
|
||||
The implementation classes are :class:`DATETIME`, :class:`DATE` and :class:`TIME`.
|
||||
These types represent dates and times as ISO formatted strings, which also nicely
|
||||
support ordering. There's no reliance on typical "libc" internals for these functions
|
||||
so historical dates are fully supported.
|
||||
|
||||
Auto Incrementing Behavior
|
||||
--------------------------
|
||||
|
||||
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
|
||||
|
||||
Two things to note:
|
||||
|
||||
* The AUTOINCREMENT keyword is **not** required for SQLite tables to
|
||||
generate primary key values automatically. AUTOINCREMENT only means that
|
||||
the algorithm used to generate ROWID values should be slightly different.
|
||||
* SQLite does **not** generate primary key (i.e. ROWID) values, even for
|
||||
one column, if the table has a composite (i.e. multi-column) primary key.
|
||||
This is regardless of the AUTOINCREMENT keyword being present or not.
|
||||
|
||||
To specifically render the AUTOINCREMENT keyword on the primary key
|
||||
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
|
||||
to the Table construct::
|
||||
|
||||
Table('sometable', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
sqlite_autoincrement=True)
|
||||
|
||||
"""
|
||||
|
||||
import datetime, re, time
|
||||
|
||||
from sqlalchemy import schema as sa_schema
|
||||
from sqlalchemy import sql, exc, pool, DefaultClause
|
||||
from sqlalchemy.engine import default
|
||||
from sqlalchemy.engine import reflection
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy import util
|
||||
from sqlalchemy.sql import compiler, functions as sql_functions
|
||||
from sqlalchemy.util import NoneType
|
||||
from sqlalchemy import processors
|
||||
|
||||
from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\
|
||||
FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME,\
|
||||
TIMESTAMP, VARCHAR
|
||||
|
||||
|
||||
class _DateTimeMixin(object):
|
||||
_reg = None
|
||||
_storage_format = None
|
||||
|
||||
def __init__(self, storage_format=None, regexp=None, **kwargs):
|
||||
if regexp is not None:
|
||||
self._reg = re.compile(regexp)
|
||||
if storage_format is not None:
|
||||
self._storage_format = storage_format
|
||||
|
||||
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
|
||||
_storage_format = "%04d-%02d-%02d %02d:%02d:%02d.%06d"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
datetime_datetime = datetime.datetime
|
||||
datetime_date = datetime.date
|
||||
format = self._storage_format
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
elif isinstance(value, datetime_datetime):
|
||||
return format % (value.year, value.month, value.day,
|
||||
value.hour, value.minute, value.second,
|
||||
value.microsecond)
|
||||
elif isinstance(value, datetime_date):
|
||||
return format % (value.year, value.month, value.day,
|
||||
0, 0, 0, 0)
|
||||
else:
|
||||
raise TypeError("SQLite DateTime type only accepts Python "
|
||||
"datetime and date objects as input.")
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self._reg:
|
||||
return processors.str_to_datetime_processor_factory(
|
||||
self._reg, datetime.datetime)
|
||||
else:
|
||||
return processors.str_to_datetime
|
||||
|
||||
class DATE(_DateTimeMixin, sqltypes.Date):
|
||||
_storage_format = "%04d-%02d-%02d"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
datetime_date = datetime.date
|
||||
format = self._storage_format
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
elif isinstance(value, datetime_date):
|
||||
return format % (value.year, value.month, value.day)
|
||||
else:
|
||||
raise TypeError("SQLite Date type only accepts Python "
|
||||
"date objects as input.")
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self._reg:
|
||||
return processors.str_to_datetime_processor_factory(
|
||||
self._reg, datetime.date)
|
||||
else:
|
||||
return processors.str_to_date
|
||||
|
||||
class TIME(_DateTimeMixin, sqltypes.Time):
|
||||
_storage_format = "%02d:%02d:%02d.%06d"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
datetime_time = datetime.time
|
||||
format = self._storage_format
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
elif isinstance(value, datetime_time):
|
||||
return format % (value.hour, value.minute, value.second,
|
||||
value.microsecond)
|
||||
else:
|
||||
raise TypeError("SQLite Time type only accepts Python "
|
||||
"time objects as input.")
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self._reg:
|
||||
return processors.str_to_datetime_processor_factory(
|
||||
self._reg, datetime.time)
|
||||
else:
|
||||
return processors.str_to_time
|
||||
|
||||
colspecs = {
|
||||
sqltypes.Date: DATE,
|
||||
sqltypes.DateTime: DATETIME,
|
||||
sqltypes.Time: TIME,
|
||||
}
|
||||
|
||||
ischema_names = {
|
||||
'BLOB': sqltypes.BLOB,
|
||||
'BOOL': sqltypes.BOOLEAN,
|
||||
'BOOLEAN': sqltypes.BOOLEAN,
|
||||
'CHAR': sqltypes.CHAR,
|
||||
'DATE': sqltypes.DATE,
|
||||
'DATETIME': sqltypes.DATETIME,
|
||||
'DECIMAL': sqltypes.DECIMAL,
|
||||
'FLOAT': sqltypes.FLOAT,
|
||||
'INT': sqltypes.INTEGER,
|
||||
'INTEGER': sqltypes.INTEGER,
|
||||
'NUMERIC': sqltypes.NUMERIC,
|
||||
'REAL': sqltypes.Numeric,
|
||||
'SMALLINT': sqltypes.SMALLINT,
|
||||
'TEXT': sqltypes.TEXT,
|
||||
'TIME': sqltypes.TIME,
|
||||
'TIMESTAMP': sqltypes.TIMESTAMP,
|
||||
'VARCHAR': sqltypes.VARCHAR,
|
||||
}
|
||||
|
||||
|
||||
|
||||
class SQLiteCompiler(compiler.SQLCompiler):
|
||||
extract_map = util.update_copy(
|
||||
compiler.SQLCompiler.extract_map,
|
||||
{
|
||||
'month': '%m',
|
||||
'day': '%d',
|
||||
'year': '%Y',
|
||||
'second': '%S',
|
||||
'hour': '%H',
|
||||
'doy': '%j',
|
||||
'minute': '%M',
|
||||
'epoch': '%s',
|
||||
'dow': '%w',
|
||||
'week': '%W'
|
||||
})
|
||||
|
||||
def visit_now_func(self, fn, **kw):
|
||||
return "CURRENT_TIMESTAMP"
|
||||
|
||||
def visit_char_length_func(self, fn, **kw):
|
||||
return "length%s" % self.function_argspec(fn)
|
||||
|
||||
def visit_cast(self, cast, **kwargs):
|
||||
if self.dialect.supports_cast:
|
||||
return super(SQLiteCompiler, self).visit_cast(cast)
|
||||
else:
|
||||
return self.process(cast.clause)
|
||||
|
||||
def visit_extract(self, extract, **kw):
|
||||
try:
|
||||
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
|
||||
self.extract_map[extract.field], self.process(extract.expr, **kw))
|
||||
except KeyError:
|
||||
raise exc.ArgumentError(
|
||||
"%s is not a valid extract argument." % extract.field)
|
||||
|
||||
def limit_clause(self, select):
|
||||
text = ""
|
||||
if select._limit is not None:
|
||||
text += " \n LIMIT " + str(select._limit)
|
||||
if select._offset is not None:
|
||||
if select._limit is None:
|
||||
text += " \n LIMIT -1"
|
||||
text += " OFFSET " + str(select._offset)
|
||||
else:
|
||||
text += " OFFSET 0"
|
||||
return text
|
||||
|
||||
def for_update_clause(self, select):
|
||||
# sqlite has no "FOR UPDATE" AFAICT
|
||||
return ''
|
||||
|
||||
|
||||
class SQLiteDDLCompiler(compiler.DDLCompiler):
|
||||
|
||||
def get_column_specification(self, column, **kwargs):
|
||||
colspec = self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)
|
||||
default = self.get_column_default_string(column)
|
||||
if default is not None:
|
||||
colspec += " DEFAULT " + default
|
||||
|
||||
if not column.nullable:
|
||||
colspec += " NOT NULL"
|
||||
|
||||
if column.primary_key and \
|
||||
column.table.kwargs.get('sqlite_autoincrement', False) and \
|
||||
len(column.table.primary_key.columns) == 1 and \
|
||||
isinstance(column.type, sqltypes.Integer) and \
|
||||
not column.foreign_keys:
|
||||
colspec += " PRIMARY KEY AUTOINCREMENT"
|
||||
|
||||
return colspec
|
||||
|
||||
def visit_primary_key_constraint(self, constraint):
|
||||
# for columns with sqlite_autoincrement=True,
|
||||
# the PRIMARY KEY constraint can only be inline
|
||||
# with the column itself.
|
||||
if len(constraint.columns) == 1:
|
||||
c = list(constraint)[0]
|
||||
if c.primary_key and \
|
||||
c.table.kwargs.get('sqlite_autoincrement', False) and \
|
||||
isinstance(c.type, sqltypes.Integer) and \
|
||||
not c.foreign_keys:
|
||||
return ''
|
||||
|
||||
return super(SQLiteDDLCompiler, self).\
|
||||
visit_primary_key_constraint(constraint)
|
||||
|
||||
|
||||
def visit_create_index(self, create):
|
||||
index = create.element
|
||||
preparer = self.preparer
|
||||
text = "CREATE "
|
||||
if index.unique:
|
||||
text += "UNIQUE "
|
||||
text += "INDEX %s ON %s (%s)" \
|
||||
% (preparer.format_index(index,
|
||||
name=self._validate_identifier(index.name, True)),
|
||||
preparer.format_table(index.table, use_schema=False),
|
||||
', '.join(preparer.quote(c.name, c.quote)
|
||||
for c in index.columns))
|
||||
return text
|
||||
|
||||
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
|
||||
def visit_large_binary(self, type_):
|
||||
return self.visit_BLOB(type_)
|
||||
|
||||
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
|
||||
reserved_words = set([
|
||||
'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
|
||||
'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
|
||||
'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
|
||||
'conflict', 'constraint', 'create', 'cross', 'current_date',
|
||||
'current_time', 'current_timestamp', 'database', 'default',
|
||||
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
|
||||
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
|
||||
'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
|
||||
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
|
||||
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is',
|
||||
'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural',
|
||||
'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer',
|
||||
'plan', 'pragma', 'primary', 'query', 'raise', 'references',
|
||||
'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback',
|
||||
'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to',
|
||||
'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using',
|
||||
'vacuum', 'values', 'view', 'virtual', 'when', 'where',
|
||||
])
|
||||
|
||||
def format_index(self, index, use_schema=True, name=None):
|
||||
"""Prepare a quoted index and schema name."""
|
||||
|
||||
if name is None:
|
||||
name = index.name
|
||||
result = self.quote(name, index.quote)
|
||||
if not self.omit_schema and use_schema and getattr(index.table, "schema", None):
|
||||
result = self.quote_schema(index.table.schema, index.table.quote_schema) + "." + result
|
||||
return result
|
||||
|
||||
class SQLiteDialect(default.DefaultDialect):
|
||||
name = 'sqlite'
|
||||
supports_alter = False
|
||||
supports_unicode_statements = True
|
||||
supports_unicode_binds = True
|
||||
supports_default_values = True
|
||||
supports_empty_insert = False
|
||||
supports_cast = True
|
||||
|
||||
default_paramstyle = 'qmark'
|
||||
statement_compiler = SQLiteCompiler
|
||||
ddl_compiler = SQLiteDDLCompiler
|
||||
type_compiler = SQLiteTypeCompiler
|
||||
preparer = SQLiteIdentifierPreparer
|
||||
ischema_names = ischema_names
|
||||
colspecs = colspecs
|
||||
isolation_level = None
|
||||
|
||||
supports_cast = True
|
||||
supports_default_values = True
|
||||
|
||||
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
|
||||
default.DefaultDialect.__init__(self, **kwargs)
|
||||
if isolation_level and isolation_level not in ('SERIALIZABLE',
|
||||
'READ UNCOMMITTED'):
|
||||
raise exc.ArgumentError("Invalid value for isolation_level. "
|
||||
"Valid isolation levels for sqlite are 'SERIALIZABLE' and "
|
||||
"'READ UNCOMMITTED'.")
|
||||
self.isolation_level = isolation_level
|
||||
|
||||
# this flag used by pysqlite dialect, and perhaps others in the
|
||||
# future, to indicate the driver is handling date/timestamp
|
||||
# conversions (and perhaps datetime/time as well on some
|
||||
# hypothetical driver ?)
|
||||
self.native_datetime = native_datetime
|
||||
|
||||
if self.dbapi is not None:
|
||||
self.supports_default_values = \
|
||||
self.dbapi.sqlite_version_info >= (3, 3, 8)
|
||||
self.supports_cast = \
|
||||
self.dbapi.sqlite_version_info >= (3, 2, 3)
|
||||
|
||||
|
||||
def on_connect(self):
|
||||
if self.isolation_level is not None:
|
||||
if self.isolation_level == 'READ UNCOMMITTED':
|
||||
isolation_level = 1
|
||||
else:
|
||||
isolation_level = 0
|
||||
|
||||
def connect(conn):
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
|
||||
cursor.close()
|
||||
return connect
|
||||
else:
|
||||
return None
|
||||
|
||||
@reflection.cache
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
if schema is not None:
|
||||
qschema = self.identifier_preparer.quote_identifier(schema)
|
||||
master = '%s.sqlite_master' % qschema
|
||||
s = ("SELECT name FROM %s "
|
||||
"WHERE type='table' ORDER BY name") % (master,)
|
||||
rs = connection.execute(s)
|
||||
else:
|
||||
try:
|
||||
s = ("SELECT name FROM "
|
||||
" (SELECT * FROM sqlite_master UNION ALL "
|
||||
" SELECT * FROM sqlite_temp_master) "
|
||||
"WHERE type='table' ORDER BY name")
|
||||
rs = connection.execute(s)
|
||||
except exc.DBAPIError:
|
||||
raise
|
||||
s = ("SELECT name FROM sqlite_master "
|
||||
"WHERE type='table' ORDER BY name")
|
||||
rs = connection.execute(s)
|
||||
|
||||
return [row[0] for row in rs]
|
||||
|
||||
def has_table(self, connection, table_name, schema=None):
|
||||
quote = self.identifier_preparer.quote_identifier
|
||||
if schema is not None:
|
||||
pragma = "PRAGMA %s." % quote(schema)
|
||||
else:
|
||||
pragma = "PRAGMA "
|
||||
qtable = quote(table_name)
|
||||
cursor = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
|
||||
row = cursor.fetchone()
|
||||
|
||||
# consume remaining rows, to work around
|
||||
# http://www.sqlite.org/cvstrac/tktview?tn=1884
|
||||
while cursor.fetchone() is not None:
|
||||
pass
|
||||
|
||||
return (row is not None)
|
||||
|
||||
@reflection.cache
|
||||
def get_view_names(self, connection, schema=None, **kw):
|
||||
if schema is not None:
|
||||
qschema = self.identifier_preparer.quote_identifier(schema)
|
||||
master = '%s.sqlite_master' % qschema
|
||||
s = ("SELECT name FROM %s "
|
||||
"WHERE type='view' ORDER BY name") % (master,)
|
||||
rs = connection.execute(s)
|
||||
else:
|
||||
try:
|
||||
s = ("SELECT name FROM "
|
||||
" (SELECT * FROM sqlite_master UNION ALL "
|
||||
" SELECT * FROM sqlite_temp_master) "
|
||||
"WHERE type='view' ORDER BY name")
|
||||
rs = connection.execute(s)
|
||||
except exc.DBAPIError:
|
||||
raise
|
||||
s = ("SELECT name FROM sqlite_master "
|
||||
"WHERE type='view' ORDER BY name")
|
||||
rs = connection.execute(s)
|
||||
|
||||
return [row[0] for row in rs]
|
||||
|
||||
@reflection.cache
|
||||
def get_view_definition(self, connection, view_name, schema=None, **kw):
|
||||
quote = self.identifier_preparer.quote_identifier
|
||||
if schema is not None:
|
||||
qschema = self.identifier_preparer.quote_identifier(schema)
|
||||
master = '%s.sqlite_master' % qschema
|
||||
s = ("SELECT sql FROM %s WHERE name = '%s'"
|
||||
"AND type='view'") % (master, view_name)
|
||||
rs = connection.execute(s)
|
||||
else:
|
||||
try:
|
||||
s = ("SELECT sql FROM "
|
||||
" (SELECT * FROM sqlite_master UNION ALL "
|
||||
" SELECT * FROM sqlite_temp_master) "
|
||||
"WHERE name = '%s' "
|
||||
"AND type='view'") % view_name
|
||||
rs = connection.execute(s)
|
||||
except exc.DBAPIError:
|
||||
raise
|
||||
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
|
||||
"AND type='view'") % view_name
|
||||
rs = connection.execute(s)
|
||||
|
||||
result = rs.fetchall()
|
||||
if result:
|
||||
return result[0].sql
|
||||
|
||||
@reflection.cache
|
||||
def get_columns(self, connection, table_name, schema=None, **kw):
|
||||
quote = self.identifier_preparer.quote_identifier
|
||||
if schema is not None:
|
||||
pragma = "PRAGMA %s." % quote(schema)
|
||||
else:
|
||||
pragma = "PRAGMA "
|
||||
qtable = quote(table_name)
|
||||
c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
|
||||
found_table = False
|
||||
columns = []
|
||||
while True:
|
||||
row = c.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
(name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
|
||||
name = re.sub(r'^\"|\"$', '', name)
|
||||
if default:
|
||||
default = re.sub(r"^\'|\'$", '', default)
|
||||
match = re.match(r'(\w+)(\(.*?\))?', type_)
|
||||
if match:
|
||||
coltype = match.group(1)
|
||||
args = match.group(2)
|
||||
else:
|
||||
coltype = "VARCHAR"
|
||||
args = ''
|
||||
try:
|
||||
coltype = self.ischema_names[coltype]
|
||||
except KeyError:
|
||||
util.warn("Did not recognize type '%s' of column '%s'" %
|
||||
(coltype, name))
|
||||
coltype = sqltypes.NullType
|
||||
if args is not None:
|
||||
args = re.findall(r'(\d+)', args)
|
||||
coltype = coltype(*[int(a) for a in args])
|
||||
|
||||
columns.append({
|
||||
'name' : name,
|
||||
'type' : coltype,
|
||||
'nullable' : nullable,
|
||||
'default' : default,
|
||||
'primary_key': primary_key
|
||||
})
|
||||
return columns
|
||||
|
||||
@reflection.cache
|
||||
def get_primary_keys(self, connection, table_name, schema=None, **kw):
|
||||
cols = self.get_columns(connection, table_name, schema, **kw)
|
||||
pkeys = []
|
||||
for col in cols:
|
||||
if col['primary_key']:
|
||||
pkeys.append(col['name'])
|
||||
return pkeys
|
||||
|
||||
@reflection.cache
|
||||
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
|
||||
quote = self.identifier_preparer.quote_identifier
|
||||
if schema is not None:
|
||||
pragma = "PRAGMA %s." % quote(schema)
|
||||
else:
|
||||
pragma = "PRAGMA "
|
||||
qtable = quote(table_name)
|
||||
c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
|
||||
fkeys = []
|
||||
fks = {}
|
||||
while True:
|
||||
row = c.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
(constraint_name, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
|
||||
rtbl = re.sub(r'^\"|\"$', '', rtbl)
|
||||
lcol = re.sub(r'^\"|\"$', '', lcol)
|
||||
rcol = re.sub(r'^\"|\"$', '', rcol)
|
||||
try:
|
||||
fk = fks[constraint_name]
|
||||
except KeyError:
|
||||
fk = {
|
||||
'name' : constraint_name,
|
||||
'constrained_columns' : [],
|
||||
'referred_schema' : None,
|
||||
'referred_table' : rtbl,
|
||||
'referred_columns' : []
|
||||
}
|
||||
fkeys.append(fk)
|
||||
fks[constraint_name] = fk
|
||||
|
||||
# look up the table based on the given table's engine, not 'self',
|
||||
# since it could be a ProxyEngine
|
||||
if lcol not in fk['constrained_columns']:
|
||||
fk['constrained_columns'].append(lcol)
|
||||
if rcol not in fk['referred_columns']:
|
||||
fk['referred_columns'].append(rcol)
|
||||
return fkeys
|
||||
|
||||
@reflection.cache
|
||||
def get_indexes(self, connection, table_name, schema=None, **kw):
|
||||
quote = self.identifier_preparer.quote_identifier
|
||||
if schema is not None:
|
||||
pragma = "PRAGMA %s." % quote(schema)
|
||||
else:
|
||||
pragma = "PRAGMA "
|
||||
include_auto_indexes = kw.pop('include_auto_indexes', False)
|
||||
qtable = quote(table_name)
|
||||
c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
|
||||
indexes = []
|
||||
while True:
|
||||
row = c.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
# ignore implicit primary key index.
|
||||
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
|
||||
elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'):
|
||||
continue
|
||||
|
||||
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
|
||||
# loop thru unique indexes to get the column names.
|
||||
for idx in indexes:
|
||||
c = connection.execute("%sindex_info(%s)" % (pragma, quote(idx['name'])))
|
||||
cols = idx['column_names']
|
||||
while True:
|
||||
row = c.fetchone()
|
||||
if row is None:
|
||||
break
|
||||
cols.append(row[2])
|
||||
return indexes
|
||||
|
||||
|
||||
def _pragma_cursor(cursor):
|
||||
"""work around SQLite issue whereby cursor.description is blank when PRAGMA returns no rows."""
|
||||
|
||||
if cursor.closed:
|
||||
cursor._fetchone_impl = lambda: None
|
||||
return cursor
|
236
sqlalchemy/dialects/sqlite/pysqlite.py
Normal file
236
sqlalchemy/dialects/sqlite/pysqlite.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""Support for the SQLite database via pysqlite.
|
||||
|
||||
Note that pysqlite is the same driver as the ``sqlite3``
|
||||
module included with the Python distribution.
|
||||
|
||||
Driver
|
||||
------
|
||||
|
||||
When using Python 2.5 and above, the built in ``sqlite3`` driver is
|
||||
already installed and no additional installation is needed. Otherwise,
|
||||
the ``pysqlite2`` driver needs to be present. This is the same driver as
|
||||
``sqlite3``, just with a different name.
|
||||
|
||||
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
|
||||
is loaded. This allows an explicitly installed pysqlite driver to take
|
||||
precedence over the built in one. As with all dialects, a specific
|
||||
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
|
||||
this explicitly::
|
||||
|
||||
from sqlite3 import dbapi2 as sqlite
|
||||
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
|
||||
|
||||
Full documentation on pysqlite is available at:
|
||||
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
|
||||
|
||||
Connect Strings
|
||||
---------------
|
||||
|
||||
The file specification for the SQLite database is taken as the "database" portion of
|
||||
the URL. Note that the format of a url is::
|
||||
|
||||
driver://user:pass@host/database
|
||||
|
||||
This means that the actual filename to be used starts with the characters to the
|
||||
**right** of the third slash. So connecting to a relative filepath looks like::
|
||||
|
||||
# relative path
|
||||
e = create_engine('sqlite:///path/to/database.db')
|
||||
|
||||
An absolute path, which is denoted by starting with a slash, means you need **four**
|
||||
slashes::
|
||||
|
||||
# absolute path
|
||||
e = create_engine('sqlite:////path/to/database.db')
|
||||
|
||||
To use a Windows path, regular drive specifications and backslashes can be used.
|
||||
Double backslashes are probably needed::
|
||||
|
||||
# absolute path on Windows
|
||||
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
|
||||
|
||||
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
|
||||
``sqlite://`` and nothing else::
|
||||
|
||||
# in-memory database
|
||||
e = create_engine('sqlite://')
|
||||
|
||||
Compatibility with sqlite3 "native" date and datetime types
|
||||
-----------------------------------------------------------
|
||||
|
||||
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
|
||||
sqlite3.PARSE_COLNAMES options, which have the effect of any column
|
||||
or expression explicitly cast as "date" or "timestamp" will be converted
|
||||
to a Python date or datetime object. The date and datetime types provided
|
||||
with the pysqlite dialect are not currently compatible with these options,
|
||||
since they render the ISO date/datetime including microseconds, which
|
||||
pysqlite's driver does not. Additionally, SQLAlchemy does not at
|
||||
this time automatically render the "cast" syntax required for the
|
||||
freestanding functions "current_timestamp" and "current_date" to return
|
||||
datetime/date types natively. Unfortunately, pysqlite
|
||||
does not provide the standard DBAPI types in `cursor.description`,
|
||||
leaving SQLAlchemy with no way to detect these types on the fly
|
||||
without expensive per-row type checks.
|
||||
|
||||
Usage of PARSE_DECLTYPES can be forced if one configures
|
||||
"native_datetime=True" on create_engine()::
|
||||
|
||||
engine = create_engine('sqlite://',
|
||||
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
|
||||
native_datetime=True
|
||||
)
|
||||
|
||||
With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
|
||||
or TIME types...confused yet ?) will not perform any bind parameter or result
|
||||
processing. Execution of "func.current_date()" will return a string.
|
||||
"func.current_timestamp()" is registered as returning a DATETIME type in
|
||||
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
|
||||
|
||||
Threading Behavior
|
||||
------------------
|
||||
|
||||
Pysqlite connections do not support being moved between threads, unless
|
||||
the ``check_same_thread`` Pysqlite flag is set to ``False``. In addition,
|
||||
when using an in-memory SQLite database, the full database exists only within
|
||||
the scope of a single connection. It is reported that an in-memory
|
||||
database does not support being shared between threads regardless of the
|
||||
``check_same_thread`` flag - which means that a multithreaded
|
||||
application **cannot** share data from a ``:memory:`` database across threads
|
||||
unless access to the connection is limited to a single worker thread which communicates
|
||||
through a queueing mechanism to concurrent threads.
|
||||
|
||||
To provide a default which accomodates SQLite's default threading capabilities
|
||||
somewhat reasonably, the SQLite dialect will specify that the :class:`~sqlalchemy.pool.SingletonThreadPool`
|
||||
be used by default. This pool maintains a single SQLite connection per thread
|
||||
that is held open up to a count of five concurrent threads. When more than five threads
|
||||
are used, a cleanup mechanism will dispose of excess unused connections.
|
||||
|
||||
Two optional pool implementations that may be appropriate for particular SQLite usage scenarios:
|
||||
|
||||
* the :class:`sqlalchemy.pool.StaticPool` might be appropriate for a multithreaded
|
||||
application using an in-memory database, assuming the threading issues inherent in
|
||||
pysqlite are somehow accomodated for. This pool holds persistently onto a single connection
|
||||
which is never closed, and is returned for all requests.
|
||||
|
||||
* the :class:`sqlalchemy.pool.NullPool` might be appropriate for an application that
|
||||
makes use of a file-based sqlite database. This pool disables any actual "pooling"
|
||||
behavior, and simply opens and closes real connections corresonding to the :func:`connect()`
|
||||
and :func:`close()` methods. SQLite can "connect" to a particular file with very high
|
||||
efficiency, so this option may actually perform better without the extra overhead
|
||||
of :class:`SingletonThreadPool`. NullPool will of course render a ``:memory:`` connection
|
||||
useless since the database would be lost as soon as the connection is "returned" to the pool.
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
In contrast to SQLAlchemy's active handling of date and time types for pysqlite, pysqlite's
|
||||
default behavior regarding Unicode is that all strings are returned as Python unicode objects
|
||||
in all cases. So even if the :class:`~sqlalchemy.types.Unicode` type is
|
||||
*not* used, you will still always receive unicode data back from a result set. It is
|
||||
**strongly** recommended that you do use the :class:`~sqlalchemy.types.Unicode` type
|
||||
to represent strings, since it will raise a warning if a non-unicode Python string is
|
||||
passed from the user application. Mixing the usage of non-unicode objects with returned unicode objects can
|
||||
quickly create confusion, particularly when using the ORM as internal data is not
|
||||
always represented by an actual database result string.
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
|
||||
from sqlalchemy import schema, exc, pool
|
||||
from sqlalchemy.engine import default
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy import util
|
||||
|
||||
|
||||
class _SQLite_pysqliteTimeStamp(DATETIME):
|
||||
def bind_processor(self, dialect):
|
||||
if dialect.native_datetime:
|
||||
return None
|
||||
else:
|
||||
return DATETIME.bind_processor(self, dialect)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if dialect.native_datetime:
|
||||
return None
|
||||
else:
|
||||
return DATETIME.result_processor(self, dialect, coltype)
|
||||
|
||||
class _SQLite_pysqliteDate(DATE):
|
||||
def bind_processor(self, dialect):
|
||||
if dialect.native_datetime:
|
||||
return None
|
||||
else:
|
||||
return DATE.bind_processor(self, dialect)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if dialect.native_datetime:
|
||||
return None
|
||||
else:
|
||||
return DATE.result_processor(self, dialect, coltype)
|
||||
|
||||
class SQLiteDialect_pysqlite(SQLiteDialect):
|
||||
default_paramstyle = 'qmark'
|
||||
poolclass = pool.SingletonThreadPool
|
||||
|
||||
colspecs = util.update_copy(
|
||||
SQLiteDialect.colspecs,
|
||||
{
|
||||
sqltypes.Date:_SQLite_pysqliteDate,
|
||||
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
|
||||
}
|
||||
)
|
||||
|
||||
# Py3K
|
||||
#description_encoding = None
|
||||
|
||||
driver = 'pysqlite'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
SQLiteDialect.__init__(self, **kwargs)
|
||||
|
||||
if self.dbapi is not None:
|
||||
sqlite_ver = self.dbapi.version_info
|
||||
if sqlite_ver < (2, 1, 3):
|
||||
util.warn(
|
||||
("The installed version of pysqlite2 (%s) is out-dated "
|
||||
"and will cause errors in some cases. Version 2.1.3 "
|
||||
"or greater is recommended.") %
|
||||
'.'.join([str(subver) for subver in sqlite_ver]))
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
try:
|
||||
from pysqlite2 import dbapi2 as sqlite
|
||||
except ImportError, e:
|
||||
try:
|
||||
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
|
||||
except ImportError:
|
||||
raise e
|
||||
return sqlite
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
return self.dbapi.sqlite_version_info
|
||||
|
||||
def create_connect_args(self, url):
|
||||
if url.username or url.password or url.host or url.port:
|
||||
raise exc.ArgumentError(
|
||||
"Invalid SQLite URL: %s\n"
|
||||
"Valid SQLite URL forms are:\n"
|
||||
" sqlite:///:memory: (or, sqlite://)\n"
|
||||
" sqlite:///relative/path/to/file.db\n"
|
||||
" sqlite:////absolute/path/to/file.db" % (url,))
|
||||
filename = url.database or ':memory:'
|
||||
|
||||
opts = url.query.copy()
|
||||
util.coerce_kw_type(opts, 'timeout', float)
|
||||
util.coerce_kw_type(opts, 'isolation_level', str)
|
||||
util.coerce_kw_type(opts, 'detect_types', int)
|
||||
util.coerce_kw_type(opts, 'check_same_thread', bool)
|
||||
util.coerce_kw_type(opts, 'cached_statements', int)
|
||||
|
||||
return ([filename], opts)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
return isinstance(e, self.dbapi.ProgrammingError) and "Cannot operate on a closed database." in str(e)
|
||||
|
||||
dialect = SQLiteDialect_pysqlite
|
20
sqlalchemy/dialects/sybase/__init__.py
Normal file
20
sqlalchemy/dialects/sybase/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from sqlalchemy.dialects.sybase import base, pysybase, pyodbc
|
||||
|
||||
|
||||
from base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
|
||||
TEXT,DATE,DATETIME, FLOAT, NUMERIC,\
|
||||
BIGINT,INT, INTEGER, SMALLINT, BINARY,\
|
||||
VARBINARY,UNITEXT,UNICHAR,UNIVARCHAR,\
|
||||
IMAGE,BIT,MONEY,SMALLMONEY,TINYINT
|
||||
|
||||
# default dialect
|
||||
base.dialect = pyodbc.dialect
|
||||
|
||||
__all__ = (
|
||||
'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR',
|
||||
'TEXT','DATE','DATETIME', 'FLOAT', 'NUMERIC',
|
||||
'BIGINT','INT', 'INTEGER', 'SMALLINT', 'BINARY',
|
||||
'VARBINARY','UNITEXT','UNICHAR','UNIVARCHAR',
|
||||
'IMAGE','BIT','MONEY','SMALLMONEY','TINYINT',
|
||||
'dialect'
|
||||
)
|
420
sqlalchemy/dialects/sybase/base.py
Normal file
420
sqlalchemy/dialects/sybase/base.py
Normal file
@@ -0,0 +1,420 @@
|
||||
# sybase/base.py
|
||||
# Copyright (C) 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
# get_select_precolumns(), limit_clause() implementation
|
||||
# copyright (C) 2007 Fisch Asset Management
|
||||
# AG http://www.fam.ch, with coding by Alexander Houben
|
||||
# alexander.houben@thor-solutions.ch
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
"""Support for Sybase Adaptive Server Enterprise (ASE).
|
||||
|
||||
Note that this dialect is no longer specific to Sybase iAnywhere.
|
||||
ASE is the primary support platform.
|
||||
|
||||
"""
|
||||
|
||||
import operator
|
||||
from sqlalchemy.sql import compiler, expression, text, bindparam
|
||||
from sqlalchemy.engine import default, base, reflection
|
||||
from sqlalchemy import types as sqltypes
|
||||
from sqlalchemy.sql import operators as sql_operators
|
||||
from sqlalchemy import schema as sa_schema
|
||||
from sqlalchemy import util, sql, exc
|
||||
|
||||
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
|
||||
TEXT,DATE,DATETIME, FLOAT, NUMERIC,\
|
||||
BIGINT,INT, INTEGER, SMALLINT, BINARY,\
|
||||
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
|
||||
UnicodeText
|
||||
|
||||
RESERVED_WORDS = set([
|
||||
"add", "all", "alter", "and",
|
||||
"any", "as", "asc", "backup",
|
||||
"begin", "between", "bigint", "binary",
|
||||
"bit", "bottom", "break", "by",
|
||||
"call", "capability", "cascade", "case",
|
||||
"cast", "char", "char_convert", "character",
|
||||
"check", "checkpoint", "close", "comment",
|
||||
"commit", "connect", "constraint", "contains",
|
||||
"continue", "convert", "create", "cross",
|
||||
"cube", "current", "current_timestamp", "current_user",
|
||||
"cursor", "date", "dbspace", "deallocate",
|
||||
"dec", "decimal", "declare", "default",
|
||||
"delete", "deleting", "desc", "distinct",
|
||||
"do", "double", "drop", "dynamic",
|
||||
"else", "elseif", "encrypted", "end",
|
||||
"endif", "escape", "except", "exception",
|
||||
"exec", "execute", "existing", "exists",
|
||||
"externlogin", "fetch", "first", "float",
|
||||
"for", "force", "foreign", "forward",
|
||||
"from", "full", "goto", "grant",
|
||||
"group", "having", "holdlock", "identified",
|
||||
"if", "in", "index", "index_lparen",
|
||||
"inner", "inout", "insensitive", "insert",
|
||||
"inserting", "install", "instead", "int",
|
||||
"integer", "integrated", "intersect", "into",
|
||||
"iq", "is", "isolation", "join",
|
||||
"key", "lateral", "left", "like",
|
||||
"lock", "login", "long", "match",
|
||||
"membership", "message", "mode", "modify",
|
||||
"natural", "new", "no", "noholdlock",
|
||||
"not", "notify", "null", "numeric",
|
||||
"of", "off", "on", "open",
|
||||
"option", "options", "or", "order",
|
||||
"others", "out", "outer", "over",
|
||||
"passthrough", "precision", "prepare", "primary",
|
||||
"print", "privileges", "proc", "procedure",
|
||||
"publication", "raiserror", "readtext", "real",
|
||||
"reference", "references", "release", "remote",
|
||||
"remove", "rename", "reorganize", "resource",
|
||||
"restore", "restrict", "return", "revoke",
|
||||
"right", "rollback", "rollup", "save",
|
||||
"savepoint", "scroll", "select", "sensitive",
|
||||
"session", "set", "setuser", "share",
|
||||
"smallint", "some", "sqlcode", "sqlstate",
|
||||
"start", "stop", "subtrans", "subtransaction",
|
||||
"synchronize", "syntax_error", "table", "temporary",
|
||||
"then", "time", "timestamp", "tinyint",
|
||||
"to", "top", "tran", "trigger",
|
||||
"truncate", "tsequal", "unbounded", "union",
|
||||
"unique", "unknown", "unsigned", "update",
|
||||
"updating", "user", "using", "validate",
|
||||
"values", "varbinary", "varchar", "variable",
|
||||
"varying", "view", "wait", "waitfor",
|
||||
"when", "where", "while", "window",
|
||||
"with", "with_cube", "with_lparen", "with_rollup",
|
||||
"within", "work", "writetext",
|
||||
])
|
||||
|
||||
|
||||
class _SybaseUnitypeMixin(object):
|
||||
"""these types appear to return a buffer object."""
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return str(value) #.decode("ucs-2")
|
||||
else:
|
||||
return None
|
||||
return process
|
||||
|
||||
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
|
||||
__visit_name__ = 'UNICHAR'
|
||||
|
||||
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
|
||||
__visit_name__ = 'UNIVARCHAR'
|
||||
|
||||
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
|
||||
__visit_name__ = 'UNITEXT'
|
||||
|
||||
class TINYINT(sqltypes.Integer):
|
||||
__visit_name__ = 'TINYINT'
|
||||
|
||||
class BIT(sqltypes.TypeEngine):
|
||||
__visit_name__ = 'BIT'
|
||||
|
||||
class MONEY(sqltypes.TypeEngine):
|
||||
__visit_name__ = "MONEY"
|
||||
|
||||
class SMALLMONEY(sqltypes.TypeEngine):
|
||||
__visit_name__ = "SMALLMONEY"
|
||||
|
||||
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
|
||||
__visit_name__ = "UNIQUEIDENTIFIER"
|
||||
|
||||
class IMAGE(sqltypes.LargeBinary):
|
||||
__visit_name__ = 'IMAGE'
|
||||
|
||||
|
||||
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
|
||||
def visit_large_binary(self, type_):
|
||||
return self.visit_IMAGE(type_)
|
||||
|
||||
def visit_boolean(self, type_):
|
||||
return self.visit_BIT(type_)
|
||||
|
||||
def visit_unicode(self, type_):
|
||||
return self.visit_NVARCHAR(type_)
|
||||
|
||||
def visit_UNICHAR(self, type_):
|
||||
return "UNICHAR(%d)" % type_.length
|
||||
|
||||
def visit_UNIVARCHAR(self, type_):
|
||||
return "UNIVARCHAR(%d)" % type_.length
|
||||
|
||||
def visit_UNITEXT(self, type_):
|
||||
return "UNITEXT"
|
||||
|
||||
def visit_TINYINT(self, type_):
|
||||
return "TINYINT"
|
||||
|
||||
def visit_IMAGE(self, type_):
|
||||
return "IMAGE"
|
||||
|
||||
def visit_BIT(self, type_):
|
||||
return "BIT"
|
||||
|
||||
def visit_MONEY(self, type_):
|
||||
return "MONEY"
|
||||
|
||||
def visit_SMALLMONEY(self, type_):
|
||||
return "SMALLMONEY"
|
||||
|
||||
def visit_UNIQUEIDENTIFIER(self, type_):
|
||||
return "UNIQUEIDENTIFIER"
|
||||
|
||||
ischema_names = {
|
||||
'integer' : INTEGER,
|
||||
'unsigned int' : INTEGER, # TODO: unsigned flags
|
||||
'unsigned smallint' : SMALLINT, # TODO: unsigned flags
|
||||
'unsigned bigint' : BIGINT, # TODO: unsigned flags
|
||||
'bigint': BIGINT,
|
||||
'smallint' : SMALLINT,
|
||||
'tinyint' : TINYINT,
|
||||
'varchar' : VARCHAR,
|
||||
'long varchar' : TEXT, # TODO
|
||||
'char' : CHAR,
|
||||
'decimal' : DECIMAL,
|
||||
'numeric' : NUMERIC,
|
||||
'float' : FLOAT,
|
||||
'double' : NUMERIC, # TODO
|
||||
'binary' : BINARY,
|
||||
'varbinary' : VARBINARY,
|
||||
'bit': BIT,
|
||||
'image' : IMAGE,
|
||||
'timestamp': TIMESTAMP,
|
||||
'money': MONEY,
|
||||
'smallmoney': MONEY,
|
||||
'uniqueidentifier': UNIQUEIDENTIFIER,
|
||||
|
||||
}
|
||||
|
||||
|
||||
class SybaseExecutionContext(default.DefaultExecutionContext):
|
||||
_enable_identity_insert = False
|
||||
|
||||
def set_ddl_autocommit(self, connection, value):
|
||||
"""Must be implemented by subclasses to accommodate DDL executions.
|
||||
|
||||
"connection" is the raw unwrapped DBAPI connection. "value"
|
||||
is True or False. when True, the connection should be configured
|
||||
such that a DDL can take place subsequently. when False,
|
||||
a DDL has taken place and the connection should be resumed
|
||||
into non-autocommit mode.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def pre_exec(self):
|
||||
if self.isinsert:
|
||||
tbl = self.compiled.statement.table
|
||||
seq_column = tbl._autoincrement_column
|
||||
insert_has_sequence = seq_column is not None
|
||||
|
||||
if insert_has_sequence:
|
||||
self._enable_identity_insert = seq_column.key in self.compiled_parameters[0]
|
||||
else:
|
||||
self._enable_identity_insert = False
|
||||
|
||||
if self._enable_identity_insert:
|
||||
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
|
||||
self.dialect.identifier_preparer.format_table(tbl))
|
||||
|
||||
if self.isddl:
|
||||
# TODO: to enhance this, we can detect "ddl in tran" on the
|
||||
# database settings. this error message should be improved to
|
||||
# include a note about that.
|
||||
if not self.should_autocommit:
|
||||
raise exc.InvalidRequestError("The Sybase dialect only supports "
|
||||
"DDL in 'autocommit' mode at this time.")
|
||||
|
||||
self.root_connection.engine.logger.info("AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
|
||||
|
||||
self.set_ddl_autocommit(self.root_connection.connection.connection, True)
|
||||
|
||||
|
||||
def post_exec(self):
|
||||
if self.isddl:
|
||||
self.set_ddl_autocommit(self.root_connection, False)
|
||||
|
||||
if self._enable_identity_insert:
|
||||
self.cursor.execute(
|
||||
"SET IDENTITY_INSERT %s OFF" %
|
||||
self.dialect.identifier_preparer.
|
||||
format_table(self.compiled.statement.table)
|
||||
)
|
||||
|
||||
def get_lastrowid(self):
|
||||
cursor = self.create_cursor()
|
||||
cursor.execute("SELECT @@identity AS lastrowid")
|
||||
lastrowid = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return lastrowid
|
||||
|
||||
class SybaseSQLCompiler(compiler.SQLCompiler):
|
||||
ansi_bind_rules = True
|
||||
|
||||
extract_map = util.update_copy(
|
||||
compiler.SQLCompiler.extract_map,
|
||||
{
|
||||
'doy': 'dayofyear',
|
||||
'dow': 'weekday',
|
||||
'milliseconds': 'millisecond'
|
||||
})
|
||||
|
||||
def get_select_precolumns(self, select):
|
||||
s = select._distinct and "DISTINCT " or ""
|
||||
if select._limit:
|
||||
#if select._limit == 1:
|
||||
#s += "FIRST "
|
||||
#else:
|
||||
#s += "TOP %s " % (select._limit,)
|
||||
s += "TOP %s " % (select._limit,)
|
||||
if select._offset:
|
||||
if not select._limit:
|
||||
# FIXME: sybase doesn't allow an offset without a limit
|
||||
# so use a huge value for TOP here
|
||||
s += "TOP 1000000 "
|
||||
s += "START AT %s " % (select._offset+1,)
|
||||
return s
|
||||
|
||||
def get_from_hint_text(self, table, text):
|
||||
return text
|
||||
|
||||
def limit_clause(self, select):
|
||||
# Limit in sybase is after the select keyword
|
||||
return ""
|
||||
|
||||
def visit_extract(self, extract, **kw):
|
||||
field = self.extract_map.get(extract.field, extract.field)
|
||||
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
|
||||
|
||||
def for_update_clause(self, select):
|
||||
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which SQLAlchemy doesn't use
|
||||
return ''
|
||||
|
||||
def order_by_clause(self, select, **kw):
|
||||
kw['literal_binds'] = True
|
||||
order_by = self.process(select._order_by_clause, **kw)
|
||||
|
||||
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
|
||||
if order_by and (not self.is_subquery() or select._limit):
|
||||
return " ORDER BY " + order_by
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
class SybaseDDLCompiler(compiler.DDLCompiler):
|
||||
def get_column_specification(self, column, **kwargs):
|
||||
colspec = self.preparer.format_column(column) + " " + \
|
||||
self.dialect.type_compiler.process(column.type)
|
||||
|
||||
if column.table is None:
|
||||
raise exc.InvalidRequestError("The Sybase dialect requires Table-bound "\
|
||||
"columns in order to generate DDL")
|
||||
seq_col = column.table._autoincrement_column
|
||||
|
||||
# install a IDENTITY Sequence if we have an implicit IDENTITY column
|
||||
if seq_col is column:
|
||||
sequence = isinstance(column.default, sa_schema.Sequence) and column.default
|
||||
if sequence:
|
||||
start, increment = sequence.start or 1, sequence.increment or 1
|
||||
else:
|
||||
start, increment = 1, 1
|
||||
if (start, increment) == (1, 1):
|
||||
colspec += " IDENTITY"
|
||||
else:
|
||||
# TODO: need correct syntax for this
|
||||
colspec += " IDENTITY(%s,%s)" % (start, increment)
|
||||
else:
|
||||
if column.nullable is not None:
|
||||
if not column.nullable or column.primary_key:
|
||||
colspec += " NOT NULL"
|
||||
else:
|
||||
colspec += " NULL"
|
||||
|
||||
default = self.get_column_default_string(column)
|
||||
if default is not None:
|
||||
colspec += " DEFAULT " + default
|
||||
|
||||
return colspec
|
||||
|
||||
def visit_drop_index(self, drop):
|
||||
index = drop.element
|
||||
return "\nDROP INDEX %s.%s" % (
|
||||
self.preparer.quote_identifier(index.table.name),
|
||||
self.preparer.quote(self._validate_identifier(index.name, False), index.quote)
|
||||
)
|
||||
|
||||
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
|
||||
reserved_words = RESERVED_WORDS
|
||||
|
||||
class SybaseDialect(default.DefaultDialect):
|
||||
name = 'sybase'
|
||||
supports_unicode_statements = False
|
||||
supports_sane_rowcount = False
|
||||
supports_sane_multi_rowcount = False
|
||||
|
||||
supports_native_boolean = False
|
||||
supports_unicode_binds = False
|
||||
postfetch_lastrowid = True
|
||||
|
||||
colspecs = {}
|
||||
ischema_names = ischema_names
|
||||
|
||||
type_compiler = SybaseTypeCompiler
|
||||
statement_compiler = SybaseSQLCompiler
|
||||
ddl_compiler = SybaseDDLCompiler
|
||||
preparer = SybaseIdentifierPreparer
|
||||
|
||||
def _get_default_schema_name(self, connection):
|
||||
return connection.scalar(
|
||||
text("SELECT user_name() as user_name", typemap={'user_name':Unicode})
|
||||
)
|
||||
|
||||
def initialize(self, connection):
|
||||
super(SybaseDialect, self).initialize(connection)
|
||||
if self.server_version_info is not None and\
|
||||
self.server_version_info < (15, ):
|
||||
self.max_identifier_length = 30
|
||||
else:
|
||||
self.max_identifier_length = 255
|
||||
|
||||
@reflection.cache
|
||||
def get_table_names(self, connection, schema=None, **kw):
|
||||
if schema is None:
|
||||
schema = self.default_schema_name
|
||||
|
||||
result = connection.execute(
|
||||
text("select sysobjects.name from sysobjects, sysusers "
|
||||
"where sysobjects.uid=sysusers.uid and "
|
||||
"sysusers.name=:schemaname and "
|
||||
"sysobjects.type='U'",
|
||||
bindparams=[
|
||||
bindparam('schemaname', schema)
|
||||
])
|
||||
)
|
||||
return [r[0] for r in result]
|
||||
|
||||
def has_table(self, connection, tablename, schema=None):
|
||||
if schema is None:
|
||||
schema = self.default_schema_name
|
||||
|
||||
result = connection.execute(
|
||||
text("select sysobjects.name from sysobjects, sysusers "
|
||||
"where sysobjects.uid=sysusers.uid and "
|
||||
"sysobjects.name=:tablename and "
|
||||
"sysusers.name=:schemaname and "
|
||||
"sysobjects.type='U'",
|
||||
bindparams=[
|
||||
bindparam('tablename', tablename),
|
||||
bindparam('schemaname', schema)
|
||||
])
|
||||
)
|
||||
return result.scalar() is not None
|
||||
|
||||
def reflecttable(self, connection, table, include_columns):
|
||||
raise NotImplementedError()
|
||||
|
17
sqlalchemy/dialects/sybase/mxodbc.py
Normal file
17
sqlalchemy/dialects/sybase/mxodbc.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""
|
||||
Support for Sybase via mxodbc.
|
||||
|
||||
This dialect is a stub only and is likely non functional at this time.
|
||||
|
||||
|
||||
"""
|
||||
from sqlalchemy.dialects.sybase.base import SybaseDialect, SybaseExecutionContext
|
||||
from sqlalchemy.connectors.mxodbc import MxODBCConnector
|
||||
|
||||
class SybaseExecutionContext_mxodbc(SybaseExecutionContext):
|
||||
pass
|
||||
|
||||
class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect):
|
||||
execution_ctx_cls = SybaseExecutionContext_mxodbc
|
||||
|
||||
dialect = SybaseDialect_mxodbc
|
75
sqlalchemy/dialects/sybase/pyodbc.py
Normal file
75
sqlalchemy/dialects/sybase/pyodbc.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
Support for Sybase via pyodbc.
|
||||
|
||||
http://pypi.python.org/pypi/pyodbc/
|
||||
|
||||
Connect strings are of the form::
|
||||
|
||||
sybase+pyodbc://<username>:<password>@<dsn>/
|
||||
sybase+pyodbc://<username>:<password>@<host>/<database>
|
||||
|
||||
Unicode Support
|
||||
---------------
|
||||
|
||||
The pyodbc driver currently supports usage of these Sybase types with
|
||||
Unicode or multibyte strings::
|
||||
|
||||
CHAR
|
||||
NCHAR
|
||||
NVARCHAR
|
||||
TEXT
|
||||
VARCHAR
|
||||
|
||||
Currently *not* supported are::
|
||||
|
||||
UNICHAR
|
||||
UNITEXT
|
||||
UNIVARCHAR
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy.dialects.sybase.base import SybaseDialect, SybaseExecutionContext
|
||||
from sqlalchemy.connectors.pyodbc import PyODBCConnector
|
||||
import decimal
|
||||
from sqlalchemy import types as sqltypes, util, processors
|
||||
|
||||
class _SybNumeric_pyodbc(sqltypes.Numeric):
|
||||
"""Turns Decimals with adjusted() < -6 into floats.
|
||||
|
||||
It's not yet known how to get decimals with many
|
||||
significant digits or very large adjusted() into Sybase
|
||||
via pyodbc.
|
||||
|
||||
"""
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_process = super(_SybNumeric_pyodbc, self).bind_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
if self.asdecimal and \
|
||||
isinstance(value, decimal.Decimal):
|
||||
|
||||
if value.adjusted() < -6:
|
||||
return processors.to_float(value)
|
||||
|
||||
if super_process:
|
||||
return super_process(value)
|
||||
else:
|
||||
return value
|
||||
return process
|
||||
|
||||
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
|
||||
def set_ddl_autocommit(self, connection, value):
|
||||
if value:
|
||||
connection.autocommit = True
|
||||
else:
|
||||
connection.autocommit = False
|
||||
|
||||
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
|
||||
execution_ctx_cls = SybaseExecutionContext_pyodbc
|
||||
|
||||
colspecs = {
|
||||
sqltypes.Numeric:_SybNumeric_pyodbc,
|
||||
}
|
||||
|
||||
dialect = SybaseDialect_pyodbc
|
98
sqlalchemy/dialects/sybase/pysybase.py
Normal file
98
sqlalchemy/dialects/sybase/pysybase.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# pysybase.py
|
||||
# Copyright (C) 2010 Michael Bayer mike_mp@zzzcomputing.com
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
"""
|
||||
Support for Sybase via the python-sybase driver.
|
||||
|
||||
http://python-sybase.sourceforge.net/
|
||||
|
||||
Connect strings are of the form::
|
||||
|
||||
sybase+pysybase://<username>:<password>@<dsn>/[database name]
|
||||
|
||||
Unicode Support
|
||||
---------------
|
||||
|
||||
The python-sybase driver does not appear to support non-ASCII strings of any
|
||||
kind at this time.
|
||||
|
||||
"""
|
||||
|
||||
from sqlalchemy import types as sqltypes, processors
|
||||
from sqlalchemy.dialects.sybase.base import SybaseDialect, \
|
||||
SybaseExecutionContext, SybaseSQLCompiler
|
||||
|
||||
|
||||
class _SybNumeric(sqltypes.Numeric):
|
||||
def result_processor(self, dialect, type_):
|
||||
if not self.asdecimal:
|
||||
return processors.to_float
|
||||
else:
|
||||
return sqltypes.Numeric.result_processor(self, dialect, type_)
|
||||
|
||||
class SybaseExecutionContext_pysybase(SybaseExecutionContext):
|
||||
|
||||
def set_ddl_autocommit(self, dbapi_connection, value):
|
||||
if value:
|
||||
# call commit() on the Sybase connection directly,
|
||||
# to avoid any side effects of calling a Connection
|
||||
# transactional method inside of pre_exec()
|
||||
dbapi_connection.commit()
|
||||
|
||||
def pre_exec(self):
|
||||
SybaseExecutionContext.pre_exec(self)
|
||||
|
||||
for param in self.parameters:
|
||||
for key in list(param):
|
||||
param["@" + key] = param[key]
|
||||
del param[key]
|
||||
|
||||
|
||||
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
|
||||
def bindparam_string(self, name):
|
||||
return "@" + name
|
||||
|
||||
class SybaseDialect_pysybase(SybaseDialect):
|
||||
driver = 'pysybase'
|
||||
execution_ctx_cls = SybaseExecutionContext_pysybase
|
||||
statement_compiler = SybaseSQLCompiler_pysybase
|
||||
|
||||
colspecs={
|
||||
sqltypes.Numeric:_SybNumeric,
|
||||
sqltypes.Float:sqltypes.Float
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def dbapi(cls):
|
||||
import Sybase
|
||||
return Sybase
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username='user', password='passwd')
|
||||
|
||||
return ([opts.pop('host')], opts)
|
||||
|
||||
def do_executemany(self, cursor, statement, parameters, context=None):
|
||||
# calling python-sybase executemany yields:
|
||||
# TypeError: string too long for buffer
|
||||
for param in parameters:
|
||||
cursor.execute(statement, param)
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
vers = connection.scalar("select @@version_number")
|
||||
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), (12, 5, 0, 0)
|
||||
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
|
||||
|
||||
def is_disconnect(self, e):
|
||||
if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)):
|
||||
msg = str(e)
|
||||
return ('Unable to complete network request to host' in msg or
|
||||
'Invalid connection state' in msg or
|
||||
'Invalid cursor state' in msg)
|
||||
else:
|
||||
return False
|
||||
|
||||
dialect = SybaseDialect_pysybase
|
145
sqlalchemy/dialects/type_migration_guidelines.txt
Normal file
145
sqlalchemy/dialects/type_migration_guidelines.txt
Normal file
@@ -0,0 +1,145 @@
|
||||
Rules for Migrating TypeEngine classes to 0.6
|
||||
---------------------------------------------
|
||||
|
||||
1. the TypeEngine classes are used for:
|
||||
|
||||
a. Specifying behavior which needs to occur for bind parameters
|
||||
or result row columns.
|
||||
|
||||
b. Specifying types that are entirely specific to the database
|
||||
in use and have no analogue in the sqlalchemy.types package.
|
||||
|
||||
c. Specifying types where there is an analogue in sqlalchemy.types,
|
||||
but the database in use takes vendor-specific flags for those
|
||||
types.
|
||||
|
||||
d. If a TypeEngine class doesn't provide any of this, it should be
|
||||
*removed* from the dialect.
|
||||
|
||||
2. the TypeEngine classes are *no longer* used for generating DDL. Dialects
|
||||
now have a TypeCompiler subclass which uses the same visit_XXX model as
|
||||
other compilers.
|
||||
|
||||
3. the "ischema_names" and "colspecs" dictionaries are now required members on
|
||||
the Dialect class.
|
||||
|
||||
4. The names of types within dialects are now important. If a dialect-specific type
|
||||
is a subclass of an existing generic type and is only provided for bind/result behavior,
|
||||
the current mixed case naming can remain, i.e. _PGNumeric for Numeric - in this case,
|
||||
end users would never need to use _PGNumeric directly. However, if a dialect-specific
|
||||
type is specifying a type *or* arguments that are not present generically, it should
|
||||
match the real name of the type on that backend, in uppercase. E.g. postgresql.INET,
|
||||
mysql.ENUM, postgresql.ARRAY.
|
||||
|
||||
Or follow this handy flowchart:
|
||||
|
||||
is the type meant to provide bind/result is the type the same name as an
|
||||
behavior to a generic type (i.e. MixedCase) ---- no ---> UPPERCASE type in types.py ?
|
||||
type in types.py ? | |
|
||||
| no yes
|
||||
yes | |
|
||||
| | does your type need special
|
||||
| +<--- yes --- behavior or arguments ?
|
||||
| | |
|
||||
| | no
|
||||
name the type using | |
|
||||
_MixedCase, i.e. v V
|
||||
_OracleBoolean. it name the type don't make a
|
||||
stays private to the dialect identically as that type, make sure the dialect's
|
||||
and is invoked *only* via within the DB, base.py imports the types.py
|
||||
the colspecs dict. using UPPERCASE UPPERCASE name into its namespace
|
||||
| (i.e. BIT, NCHAR, INTERVAL).
|
||||
| Users can import it.
|
||||
| |
|
||||
v v
|
||||
subclass the closest is the name of this type
|
||||
MixedCase type types.py, identical to an UPPERCASE
|
||||
i.e. <--- no ------- name in types.py ?
|
||||
class _DateTime(types.DateTime),
|
||||
class DATETIME2(types.DateTime), |
|
||||
class BIT(types.TypeEngine). yes
|
||||
|
|
||||
v
|
||||
the type should
|
||||
subclass the
|
||||
UPPERCASE
|
||||
type in types.py
|
||||
(i.e. class BLOB(types.BLOB))
|
||||
|
||||
|
||||
Example 1. pysqlite needs bind/result processing for the DateTime type in types.py,
|
||||
which applies to all DateTimes and subclasses. It's named _SLDateTime and
|
||||
subclasses types.DateTime.
|
||||
|
||||
Example 2. MS-SQL has a TIME type which takes a non-standard "precision" argument
|
||||
that is rendered within DDL. So it's named TIME in the MS-SQL dialect's base.py,
|
||||
and subclasses types.TIME. Users can then say mssql.TIME(precision=10).
|
||||
|
||||
Example 3. MS-SQL dialects also need special bind/result processing for date
|
||||
But its DATE type doesn't render DDL differently than that of a plain
|
||||
DATE, i.e. it takes no special arguments. Therefore we are just adding behavior
|
||||
to types.Date, so it's named _MSDate in the MS-SQL dialect's base.py, and subclasses
|
||||
types.Date.
|
||||
|
||||
Example 4. MySQL has a SET type, there's no analogue for this in types.py. So
|
||||
MySQL names it SET in the dialect's base.py, and it subclasses types.String, since
|
||||
it ultimately deals with strings.
|
||||
|
||||
Example 5. Postgresql has a DATETIME type. The DBAPIs handle dates correctly,
|
||||
and no special arguments are used in PG's DDL beyond what types.py provides.
|
||||
Postgresql dialect therefore imports types.DATETIME into its base.py.
|
||||
|
||||
Ideally one should be able to specify a schema using names imported completely from a
|
||||
dialect, all matching the real name on that backend:
|
||||
|
||||
from sqlalchemy.dialects.postgresql import base as pg
|
||||
|
||||
t = Table('mytable', metadata,
|
||||
Column('id', pg.INTEGER, primary_key=True),
|
||||
Column('name', pg.VARCHAR(300)),
|
||||
Column('inetaddr', pg.INET)
|
||||
)
|
||||
|
||||
where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types,
|
||||
but the PG dialect makes them available in its own namespace.
|
||||
|
||||
5. "colspecs" now is a dictionary of generic or uppercased types from sqlalchemy.types
|
||||
linked to types specified in the dialect. Again, if a type in the dialect does not
|
||||
specify any special behavior for bind_processor() or result_processor() and does not
|
||||
indicate a special type only available in this database, it must be *removed* from the
|
||||
module and from this dictionary.
|
||||
|
||||
6. "ischema_names" indicates string descriptions of types as returned from the database
|
||||
linked to TypeEngine classes.
|
||||
|
||||
a. The string name should be matched to the most specific type possible within
|
||||
sqlalchemy.types, unless there is no matching type within sqlalchemy.types in which
|
||||
case it points to a dialect type. *It doesn't matter* if the dialect has it's
|
||||
own subclass of that type with special bind/result behavior - reflect to the types.py
|
||||
UPPERCASE type as much as possible. With very few exceptions, all types
|
||||
should reflect to an UPPERCASE type.
|
||||
|
||||
b. If the dialect contains a matching dialect-specific type that takes extra arguments
|
||||
which the generic one does not, then point to the dialect-specific type. E.g.
|
||||
mssql.VARCHAR takes a "collation" parameter which should be preserved.
|
||||
|
||||
5. DDL, or what was formerly issued by "get_col_spec()", is now handled exclusively by
|
||||
a subclass of compiler.GenericTypeCompiler.
|
||||
|
||||
a. your TypeCompiler class will receive generic and uppercase types from
|
||||
sqlalchemy.types. Do not assume the presence of dialect-specific attributes on
|
||||
these types.
|
||||
|
||||
b. the visit_UPPERCASE methods on GenericTypeCompiler should *not* be overridden with
|
||||
methods that produce a different DDL name. Uppercase types don't do any kind of
|
||||
"guessing" - if visit_TIMESTAMP is called, the DDL should render as TIMESTAMP in
|
||||
all cases, regardless of whether or not that type is legal on the backend database.
|
||||
|
||||
c. the visit_UPPERCASE methods *should* be overridden with methods that add additional
|
||||
arguments and flags to those types.
|
||||
|
||||
d. the visit_lowercase methods are overridden to provide an interpretation of a generic
|
||||
type. E.g. visit_large_binary() might be overridden to say "return self.visit_BIT(type_)".
|
||||
|
||||
e. visit_lowercase methods should *never* render strings directly - it should always
|
||||
be via calling a visit_UPPERCASE() method.
|
Reference in New Issue
Block a user