Updated SqlAlchemy + the new files

This commit is contained in:
2017-04-15 16:33:29 +00:00
parent e3267d4bda
commit 4669737fe3
134 changed files with 66374 additions and 4528 deletions

View File

@ -1,6 +1,7 @@
/*
processors.c
Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com
Copyright (C) 2010-2017 the SQLAlchemy authors and contributors <see AUTHORS file>
Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com
This module is part of SQLAlchemy and is released under
the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -9,26 +10,30 @@ the MIT License: http://www.opensource.org/licenses/mit-license.php
#include <Python.h>
#include <datetime.h>
#define MODULE_NAME "cprocessors"
#define MODULE_DOC "Module containing C versions of data processing functions."
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#endif
static PyObject *
int_to_boolean(PyObject *self, PyObject *arg)
{
long l = 0;
int l = 0;
PyObject *res;
if (arg == Py_None)
Py_RETURN_NONE;
l = PyInt_AsLong(arg);
l = PyObject_IsTrue(arg);
if (l == 0) {
res = Py_False;
} else if (l == 1) {
res = Py_True;
} else if ((l == -1) && PyErr_Occurred()) {
/* -1 can be either the actual value, or an error flag. */
return NULL;
} else {
PyErr_SetString(PyExc_ValueError,
"int_to_boolean only accepts None, 0 or 1");
return NULL;
}
@ -57,15 +62,51 @@ to_float(PyObject *self, PyObject *arg)
static PyObject *
str_to_datetime(PyObject *self, PyObject *arg)
{
#if PY_MAJOR_VERSION >= 3
PyObject *bytes;
PyObject *err_bytes;
#endif
const char *str;
int numparsed;
unsigned int year, month, day, hour, minute, second, microsecond = 0;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
#if PY_MAJOR_VERSION >= 3
bytes = PyUnicode_AsASCIIString(arg);
if (bytes == NULL)
str = NULL;
else
str = PyBytes_AS_STRING(bytes);
#else
str = PyString_AsString(arg);
if (str == NULL)
#endif
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string '%.200s' "
"- value is not a string.",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string '%.200s' "
"- value is not a string.",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
/* microseconds are optional */
/*
@ -73,9 +114,31 @@ str_to_datetime(PyObject *self, PyObject *arg)
not accept "2000-01-01 00:00:00.". I don't know which is better, but they
should be coherent.
*/
if (sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day,
&hour, &minute, &second, &microsecond) < 6) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse datetime string.");
numparsed = sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day,
&hour, &minute, &second, &microsecond);
#if PY_MAJOR_VERSION >= 3
Py_DECREF(bytes);
#endif
if (numparsed < 6) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string: %.200s",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string: %.200s",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
return PyDateTime_FromDateAndTime(year, month, day,
@ -85,15 +148,50 @@ str_to_datetime(PyObject *self, PyObject *arg)
static PyObject *
str_to_time(PyObject *self, PyObject *arg)
{
#if PY_MAJOR_VERSION >= 3
PyObject *bytes;
PyObject *err_bytes;
#endif
const char *str;
int numparsed;
unsigned int hour, minute, second, microsecond = 0;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
#if PY_MAJOR_VERSION >= 3
bytes = PyUnicode_AsASCIIString(arg);
if (bytes == NULL)
str = NULL;
else
str = PyBytes_AS_STRING(bytes);
#else
str = PyString_AsString(arg);
if (str == NULL)
#endif
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string '%.200s' - value is not a string.",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string '%.200s' - value is not a string.",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
/* microseconds are optional */
/*
@ -101,9 +199,31 @@ str_to_time(PyObject *self, PyObject *arg)
not accept "00:00:00.". I don't know which is better, but they should be
coherent.
*/
if (sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second,
&microsecond) < 3) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse time string.");
numparsed = sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second,
&microsecond);
#if PY_MAJOR_VERSION >= 3
Py_DECREF(bytes);
#endif
if (numparsed < 3) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string: %.200s",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string: %.200s",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
return PyTime_FromTime(hour, minute, second, microsecond);
@ -112,18 +232,74 @@ str_to_time(PyObject *self, PyObject *arg)
static PyObject *
str_to_date(PyObject *self, PyObject *arg)
{
#if PY_MAJOR_VERSION >= 3
PyObject *bytes;
PyObject *err_bytes;
#endif
const char *str;
int numparsed;
unsigned int year, month, day;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
#if PY_MAJOR_VERSION >= 3
bytes = PyUnicode_AsASCIIString(arg);
if (bytes == NULL)
str = NULL;
else
str = PyBytes_AS_STRING(bytes);
#else
str = PyString_AsString(arg);
if (str == NULL)
#endif
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string '%.200s' - value is not a string.",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string '%.200s' - value is not a string.",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
if (sscanf(str, "%4u-%2u-%2u", &year, &month, &day) != 3) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse date string.");
numparsed = sscanf(str, "%4u-%2u-%2u", &year, &month, &day);
#if PY_MAJOR_VERSION >= 3
Py_DECREF(bytes);
#endif
if (numparsed != 3) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(err_repr);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string: %.200s",
PyBytes_AS_STRING(err_bytes));
Py_DECREF(err_bytes);
#else
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string: %.200s",
PyString_AsString(err_repr));
#endif
Py_DECREF(err_repr);
return NULL;
}
return PyDate_FromDate(year, month, day);
@ -159,17 +335,35 @@ UnicodeResultProcessor_init(UnicodeResultProcessor *self, PyObject *args,
PyObject *encoding, *errors = NULL;
static char *kwlist[] = {"encoding", "errors", NULL};
#if PY_MAJOR_VERSION >= 3
if (!PyArg_ParseTupleAndKeywords(args, kwds, "U|U:__init__", kwlist,
&encoding, &errors))
return -1;
#else
if (!PyArg_ParseTupleAndKeywords(args, kwds, "S|S:__init__", kwlist,
&encoding, &errors))
return -1;
#endif
#if PY_MAJOR_VERSION >= 3
encoding = PyUnicode_AsASCIIString(encoding);
#else
Py_INCREF(encoding);
#endif
self->encoding = encoding;
if (errors) {
#if PY_MAJOR_VERSION >= 3
errors = PyUnicode_AsASCIIString(errors);
#else
Py_INCREF(errors);
#endif
} else {
#if PY_MAJOR_VERSION >= 3
errors = PyBytes_FromString("strict");
#else
errors = PyString_FromString("strict");
#endif
if (errors == NULL)
return -1;
}
@ -188,28 +382,88 @@ UnicodeResultProcessor_process(UnicodeResultProcessor *self, PyObject *value)
if (value == Py_None)
Py_RETURN_NONE;
#if PY_MAJOR_VERSION >= 3
if (PyBytes_AsStringAndSize(value, &str, &len))
return NULL;
encoding = PyBytes_AS_STRING(self->encoding);
errors = PyBytes_AS_STRING(self->errors);
#else
if (PyString_AsStringAndSize(value, &str, &len))
return NULL;
encoding = PyString_AS_STRING(self->encoding);
errors = PyString_AS_STRING(self->errors);
#endif
return PyUnicode_Decode(str, len, encoding, errors);
}
static PyObject *
UnicodeResultProcessor_conditional_process(UnicodeResultProcessor *self, PyObject *value)
{
const char *encoding, *errors;
char *str;
Py_ssize_t len;
if (value == Py_None)
Py_RETURN_NONE;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_Check(value) == 1) {
Py_INCREF(value);
return value;
}
if (PyBytes_AsStringAndSize(value, &str, &len))
return NULL;
encoding = PyBytes_AS_STRING(self->encoding);
errors = PyBytes_AS_STRING(self->errors);
#else
if (PyUnicode_Check(value) == 1) {
Py_INCREF(value);
return value;
}
if (PyString_AsStringAndSize(value, &str, &len))
return NULL;
encoding = PyString_AS_STRING(self->encoding);
errors = PyString_AS_STRING(self->errors);
#endif
return PyUnicode_Decode(str, len, encoding, errors);
}
static void
UnicodeResultProcessor_dealloc(UnicodeResultProcessor *self)
{
Py_XDECREF(self->encoding);
Py_XDECREF(self->errors);
#if PY_MAJOR_VERSION >= 3
Py_TYPE(self)->tp_free((PyObject*)self);
#else
self->ob_type->tp_free((PyObject*)self);
#endif
}
static PyMethodDef UnicodeResultProcessor_methods[] = {
{"process", (PyCFunction)UnicodeResultProcessor_process, METH_O,
"The value processor itself."},
{"conditional_process", (PyCFunction)UnicodeResultProcessor_conditional_process, METH_O,
"Conditional version of the value processor."},
{NULL} /* Sentinel */
};
static PyTypeObject UnicodeResultProcessorType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size */
PyVarObject_HEAD_INIT(NULL, 0)
"sqlalchemy.cprocessors.UnicodeResultProcessor", /* tp_name */
sizeof(UnicodeResultProcessor), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
(destructor)UnicodeResultProcessor_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
@ -255,7 +509,11 @@ DecimalResultProcessor_init(DecimalResultProcessor *self, PyObject *args,
{
PyObject *type, *format;
#if PY_MAJOR_VERSION >= 3
if (!PyArg_ParseTuple(args, "OU", &type, &format))
#else
if (!PyArg_ParseTuple(args, "OS", &type, &format))
#endif
return -1;
Py_INCREF(type);
@ -275,22 +533,40 @@ DecimalResultProcessor_process(DecimalResultProcessor *self, PyObject *value)
if (value == Py_None)
Py_RETURN_NONE;
if (PyFloat_CheckExact(value)) {
/* Decimal does not accept float values directly */
args = PyTuple_Pack(1, value);
if (args == NULL)
return NULL;
/* Decimal does not accept float values directly */
/* SQLite can also give us an integer here (see [ticket:2432]) */
/* XXX: starting with Python 3.1, we could use Decimal.from_float(f),
but the result wouldn't be the same */
str = PyString_Format(self->format, args);
if (str == NULL)
return NULL;
args = PyTuple_Pack(1, value);
if (args == NULL)
return NULL;
result = PyObject_CallFunctionObjArgs(self->type, str, NULL);
Py_DECREF(str);
return result;
} else {
return PyObject_CallFunctionObjArgs(self->type, value, NULL);
}
#if PY_MAJOR_VERSION >= 3
str = PyUnicode_Format(self->format, args);
#else
str = PyString_Format(self->format, args);
#endif
Py_DECREF(args);
if (str == NULL)
return NULL;
result = PyObject_CallFunctionObjArgs(self->type, str, NULL);
Py_DECREF(str);
return result;
}
static void
DecimalResultProcessor_dealloc(DecimalResultProcessor *self)
{
Py_XDECREF(self->type);
Py_XDECREF(self->format);
#if PY_MAJOR_VERSION >= 3
Py_TYPE(self)->tp_free((PyObject*)self);
#else
self->ob_type->tp_free((PyObject*)self);
#endif
}
static PyMethodDef DecimalResultProcessor_methods[] = {
@ -300,12 +576,11 @@ static PyMethodDef DecimalResultProcessor_methods[] = {
};
static PyTypeObject DecimalResultProcessorType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size */
PyVarObject_HEAD_INIT(NULL, 0)
"sqlalchemy.DecimalResultProcessor", /* tp_name */
sizeof(DecimalResultProcessor), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
(destructor)DecimalResultProcessor_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
@ -341,11 +616,6 @@ static PyTypeObject DecimalResultProcessorType = {
0, /* tp_new */
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
static PyMethodDef module_methods[] = {
{"int_to_boolean", int_to_boolean, METH_O,
"Convert an integer to a boolean."},
@ -362,23 +632,53 @@ static PyMethodDef module_methods[] = {
{NULL, NULL, 0, NULL} /* Sentinel */
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
MODULE_NAME,
MODULE_DOC,
-1,
module_methods
};
#define INITERROR return NULL
PyMODINIT_FUNC
PyInit_cprocessors(void)
#else
#define INITERROR return
PyMODINIT_FUNC
initcprocessors(void)
#endif
{
PyObject *m;
UnicodeResultProcessorType.tp_new = PyType_GenericNew;
if (PyType_Ready(&UnicodeResultProcessorType) < 0)
return;
INITERROR;
DecimalResultProcessorType.tp_new = PyType_GenericNew;
if (PyType_Ready(&DecimalResultProcessorType) < 0)
return;
INITERROR;
m = Py_InitModule3("cprocessors", module_methods,
"Module containing C versions of data processing functions.");
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&module_def);
#else
m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
#endif
if (m == NULL)
return;
INITERROR;
PyDateTime_IMPORT;
@ -389,5 +689,8 @@ initcprocessors(void)
Py_INCREF(&DecimalResultProcessorType);
PyModule_AddObject(m, "DecimalResultProcessor",
(PyObject *)&DecimalResultProcessorType);
}
#if PY_MAJOR_VERSION >= 3
return m;
#endif
}

View File

@ -1,6 +1,7 @@
/*
resultproxy.c
Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com
Copyright (C) 2010-2017 the SQLAlchemy authors and contributors <see AUTHORS file>
Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com
This module is part of SQLAlchemy and is released under
the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,6 +9,18 @@ the MIT License: http://www.opensource.org/licenses/mit-license.php
#include <Python.h>
#define MODULE_NAME "cresultproxy"
#define MODULE_DOC "Module containing C versions of core ResultProxy classes."
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
typedef Py_ssize_t (*lenfunc)(PyObject *);
#define PyInt_FromSsize_t(x) PyInt_FromLong(x)
typedef intargfunc ssizeargfunc;
#endif
/***********
* Structs *
@ -69,8 +82,8 @@ BaseRowProxy_init(BaseRowProxy *self, PyObject *args, PyObject *kwds)
Py_INCREF(parent);
self->parent = parent;
if (!PyTuple_CheckExact(row)) {
PyErr_SetString(PyExc_TypeError, "row must be a tuple");
if (!PySequence_Check(row)) {
PyErr_SetString(PyExc_TypeError, "row must be a sequence");
return -1;
}
Py_INCREF(row);
@ -100,11 +113,11 @@ BaseRowProxy_init(BaseRowProxy *self, PyObject *args, PyObject *kwds)
static PyObject *
BaseRowProxy_reduce(PyObject *self)
{
PyObject *method, *state;
PyObject *module, *reconstructor, *cls;
PyObject *method, *state;
PyObject *module, *reconstructor, *cls;
method = PyObject_GetAttrString(self, "__getstate__");
if (method == NULL)
method = PyObject_GetAttrString(self, "__getstate__");
if (method == NULL)
return NULL;
state = PyObject_CallObject(method, NULL);
@ -112,7 +125,7 @@ BaseRowProxy_reduce(PyObject *self)
if (state == NULL)
return NULL;
module = PyImport_ImportModule("sqlalchemy.engine.base");
module = PyImport_ImportModule("sqlalchemy.engine.result");
if (module == NULL)
return NULL;
@ -140,7 +153,11 @@ BaseRowProxy_dealloc(BaseRowProxy *self)
Py_XDECREF(self->row);
Py_XDECREF(self->processors);
Py_XDECREF(self->keymap);
#if PY_MAJOR_VERSION >= 3
Py_TYPE(self)->tp_free((PyObject *)self);
#else
self->ob_type->tp_free((PyObject *)self);
#endif
}
static PyObject *
@ -148,13 +165,15 @@ BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple)
{
Py_ssize_t num_values, num_processors;
PyObject **valueptr, **funcptr, **resultptr;
PyObject *func, *result, *processed_value;
PyObject *func, *result, *processed_value, *values_fastseq;
num_values = Py_SIZE(values);
num_processors = Py_SIZE(processors);
num_values = PySequence_Length(values);
num_processors = PyList_Size(processors);
if (num_values != num_processors) {
PyErr_SetString(PyExc_RuntimeError,
"number of values in row differ from number of column processors");
PyErr_Format(PyExc_RuntimeError,
"number of values in row (%d) differ from number of column "
"processors (%d)",
(int)num_values, (int)num_processors);
return NULL;
}
@ -166,9 +185,11 @@ BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple)
if (result == NULL)
return NULL;
/* we don't need to use PySequence_Fast as long as values, processors and
* result are simple tuple or lists. */
valueptr = PySequence_Fast_ITEMS(values);
values_fastseq = PySequence_Fast(values, "row must be a sequence");
if (values_fastseq == NULL)
return NULL;
valueptr = PySequence_Fast_ITEMS(values_fastseq);
funcptr = PySequence_Fast_ITEMS(processors);
resultptr = PySequence_Fast_ITEMS(result);
while (--num_values >= 0) {
@ -177,6 +198,7 @@ BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple)
processed_value = PyObject_CallFunctionObjArgs(func, *valueptr,
NULL);
if (processed_value == NULL) {
Py_DECREF(values_fastseq);
Py_DECREF(result);
return NULL;
}
@ -189,6 +211,7 @@ BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple)
funcptr++;
resultptr++;
}
Py_DECREF(values_fastseq);
return result;
}
@ -199,19 +222,12 @@ BaseRowProxy_values(BaseRowProxy *self)
self->processors, 0);
}
static PyTupleObject *
BaseRowProxy_tuplevalues(BaseRowProxy *self)
{
return (PyTupleObject *)BaseRowProxy_processvalues(self->row,
self->processors, 1);
}
static PyObject *
BaseRowProxy_iter(BaseRowProxy *self)
{
PyObject *values, *result;
values = (PyObject *)BaseRowProxy_tuplevalues(self);
values = BaseRowProxy_processvalues(self->row, self->processors, 1);
if (values == NULL)
return NULL;
@ -226,26 +242,39 @@ BaseRowProxy_iter(BaseRowProxy *self)
static Py_ssize_t
BaseRowProxy_length(BaseRowProxy *self)
{
return Py_SIZE(self->row);
return PySequence_Length(self->row);
}
static PyObject *
BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
{
PyObject *processors, *values;
PyObject *processor, *value;
PyObject *record, *result, *indexobject;
PyObject *exc_module, *exception;
PyObject *processor, *value, *processed_value;
PyObject *row, *record, *result, *indexobject;
PyObject *exc_module, *exception, *cstr_obj;
#if PY_MAJOR_VERSION >= 3
PyObject *bytes;
#endif
char *cstr_key;
long index;
int key_fallback = 0;
int tuple_check = 0;
#if PY_MAJOR_VERSION < 3
if (PyInt_CheckExact(key)) {
index = PyInt_AS_LONG(key);
} else if (PyLong_CheckExact(key)) {
if (index < 0)
index += BaseRowProxy_length(self);
} else
#endif
if (PyLong_CheckExact(key)) {
index = PyLong_AsLong(key);
if ((index == -1) && PyErr_Occurred())
/* -1 can be either the actual value, or an error flag. */
return NULL;
if (index < 0)
index += BaseRowProxy_length(self);
} else if (PySlice_Check(key)) {
values = PyObject_GetItem(self->row, key);
if (values == NULL)
@ -268,12 +297,17 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
"O", key);
if (record == NULL)
return NULL;
key_fallback = 1;
}
indexobject = PyTuple_GetItem(record, 1);
indexobject = PyTuple_GetItem(record, 2);
if (indexobject == NULL)
return NULL;
if (key_fallback) {
Py_DECREF(record);
}
if (indexobject == Py_None) {
exc_module = PyImport_ImportModule("sqlalchemy.exc");
if (exc_module == NULL)
@ -285,17 +319,47 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
if (exception == NULL)
return NULL;
cstr_key = PyString_AsString(key);
if (cstr_key == NULL)
cstr_obj = PyTuple_GetItem(record, 1);
if (cstr_obj == NULL)
return NULL;
cstr_obj = PyObject_Str(cstr_obj);
if (cstr_obj == NULL)
return NULL;
/*
FIXME: raise encoding error exception (in both versions below)
if the key contains non-ascii chars, instead of an
InvalidRequestError without any message like in the
python version.
*/
#if PY_MAJOR_VERSION >= 3
bytes = PyUnicode_AsASCIIString(cstr_obj);
if (bytes == NULL)
return NULL;
cstr_key = PyBytes_AS_STRING(bytes);
#else
cstr_key = PyString_AsString(cstr_obj);
#endif
if (cstr_key == NULL) {
Py_DECREF(cstr_obj);
return NULL;
}
Py_DECREF(cstr_obj);
PyErr_Format(exception,
"Ambiguous column name '%s' in result set! "
"try 'use_labels' option on select statement.", cstr_key);
"Ambiguous column name '%.200s' in "
"result set column descriptions", cstr_key);
return NULL;
}
#if PY_MAJOR_VERSION >= 3
index = PyLong_AsLong(indexobject);
#else
index = PyInt_AsLong(indexobject);
#endif
if ((index == -1) && PyErr_Occurred())
/* -1 can be either the actual value, or an error flag. */
return NULL;
@ -304,22 +368,53 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
if (processor == NULL)
return NULL;
value = PyTuple_GetItem(self->row, index);
row = self->row;
if (PyTuple_CheckExact(row)) {
value = PyTuple_GetItem(row, index);
tuple_check = 1;
}
else {
value = PySequence_GetItem(row, index);
tuple_check = 0;
}
if (value == NULL)
return NULL;
if (processor != Py_None) {
return PyObject_CallFunctionObjArgs(processor, value, NULL);
processed_value = PyObject_CallFunctionObjArgs(processor, value, NULL);
if (!tuple_check) {
Py_DECREF(value);
}
return processed_value;
} else {
Py_INCREF(value);
if (tuple_check) {
Py_INCREF(value);
}
return value;
}
}
static PyObject *
BaseRowProxy_getitem(PyObject *self, Py_ssize_t i)
{
PyObject *index;
#if PY_MAJOR_VERSION >= 3
index = PyLong_FromSsize_t(i);
#else
index = PyInt_FromSsize_t(i);
#endif
return BaseRowProxy_subscript((BaseRowProxy*)self, index);
}
static PyObject *
BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
PyObject *err_bytes;
#endif
if (!(tmp = PyObject_GenericGetAttr((PyObject *)self, name))) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
@ -329,7 +424,28 @@ BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name)
else
return tmp;
return BaseRowProxy_subscript(self, name);
tmp = BaseRowProxy_subscript(self, name);
if (tmp == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) {
#if PY_MAJOR_VERSION >= 3
err_bytes = PyUnicode_AsASCIIString(name);
if (err_bytes == NULL)
return NULL;
PyErr_Format(
PyExc_AttributeError,
"Could not locate column in row for column '%.200s'",
PyBytes_AS_STRING(err_bytes)
);
#else
PyErr_Format(
PyExc_AttributeError,
"Could not locate column in row for column '%.200s'",
PyString_AsString(name)
);
#endif
return NULL;
}
return tmp;
}
/***********************
@ -354,7 +470,7 @@ BaseRowProxy_setparent(BaseRowProxy *self, PyObject *value, void *closure)
return -1;
}
module = PyImport_ImportModule("sqlalchemy.engine.base");
module = PyImport_ImportModule("sqlalchemy.engine.result");
if (module == NULL)
return -1;
@ -393,9 +509,9 @@ BaseRowProxy_setrow(BaseRowProxy *self, PyObject *value, void *closure)
return -1;
}
if (!PyTuple_CheckExact(value)) {
if (!PySequence_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"The 'row' attribute value must be a tuple");
"The 'row' attribute value must be a sequence");
return -1;
}
@ -487,8 +603,8 @@ static PyGetSetDef BaseRowProxy_getseters[] = {
static PyMethodDef BaseRowProxy_methods[] = {
{"values", (PyCFunction)BaseRowProxy_values, METH_NOARGS,
"Return the values represented by this BaseRowProxy as a list."},
{"__reduce__", (PyCFunction)BaseRowProxy_reduce, METH_NOARGS,
"Pickle support method."},
{"__reduce__", (PyCFunction)BaseRowProxy_reduce, METH_NOARGS,
"Pickle support method."},
{NULL} /* Sentinel */
};
@ -496,7 +612,7 @@ static PySequenceMethods BaseRowProxy_as_sequence = {
(lenfunc)BaseRowProxy_length, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
0, /* sq_item */
(ssizeargfunc)BaseRowProxy_getitem, /* sq_item */
0, /* sq_slice */
0, /* sq_ass_item */
0, /* sq_ass_slice */
@ -512,8 +628,7 @@ static PyMappingMethods BaseRowProxy_as_mapping = {
};
static PyTypeObject BaseRowProxyType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size */
PyVarObject_HEAD_INIT(NULL, 0)
"sqlalchemy.cresultproxy.BaseRowProxy", /* tp_name */
sizeof(BaseRowProxy), /* tp_basicsize */
0, /* tp_itemsize */
@ -553,34 +668,60 @@ static PyTypeObject BaseRowProxyType = {
0 /* tp_new */
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
static PyMethodDef module_methods[] = {
{"safe_rowproxy_reconstructor", safe_rowproxy_reconstructor, METH_VARARGS,
"reconstruct a RowProxy instance from its pickled form."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
MODULE_NAME,
MODULE_DOC,
-1,
module_methods
};
#define INITERROR return NULL
PyMODINIT_FUNC
PyInit_cresultproxy(void)
#else
#define INITERROR return
PyMODINIT_FUNC
initcresultproxy(void)
#endif
{
PyObject *m;
BaseRowProxyType.tp_new = PyType_GenericNew;
if (PyType_Ready(&BaseRowProxyType) < 0)
return;
INITERROR;
m = Py_InitModule3("cresultproxy", module_methods,
"Module containing C versions of core ResultProxy classes.");
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&module_def);
#else
m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
#endif
if (m == NULL)
return;
INITERROR;
Py_INCREF(&BaseRowProxyType);
PyModule_AddObject(m, "BaseRowProxy", (PyObject *)&BaseRowProxyType);
#if PY_MAJOR_VERSION >= 3
return m;
#endif
}

View File

@ -0,0 +1,225 @@
/*
utils.c
Copyright (C) 2012-2017 the SQLAlchemy authors and contributors <see AUTHORS file>
This module is part of SQLAlchemy and is released under
the MIT License: http://www.opensource.org/licenses/mit-license.php
*/
#include <Python.h>
#define MODULE_NAME "cutils"
#define MODULE_DOC "Module containing C versions of utility functions."
/*
Given arguments from the calling form *multiparams, **params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
*/
static PyObject *
distill_params(PyObject *self, PyObject *args)
{
PyObject *multiparams, *params;
PyObject *enclosing_list, *double_enclosing_list;
PyObject *zero_element, *zero_element_item;
Py_ssize_t multiparam_size, zero_element_length;
if (!PyArg_UnpackTuple(args, "_distill_params", 2, 2, &multiparams, &params)) {
return NULL;
}
if (multiparams != Py_None) {
multiparam_size = PyTuple_Size(multiparams);
if (multiparam_size < 0) {
return NULL;
}
}
else {
multiparam_size = 0;
}
if (multiparam_size == 0) {
if (params != Py_None && PyDict_Size(params) != 0) {
enclosing_list = PyList_New(1);
if (enclosing_list == NULL) {
return NULL;
}
Py_INCREF(params);
if (PyList_SetItem(enclosing_list, 0, params) == -1) {
Py_DECREF(params);
Py_DECREF(enclosing_list);
return NULL;
}
}
else {
enclosing_list = PyList_New(0);
if (enclosing_list == NULL) {
return NULL;
}
}
return enclosing_list;
}
else if (multiparam_size == 1) {
zero_element = PyTuple_GetItem(multiparams, 0);
if (PyTuple_Check(zero_element) || PyList_Check(zero_element)) {
zero_element_length = PySequence_Length(zero_element);
if (zero_element_length != 0) {
zero_element_item = PySequence_GetItem(zero_element, 0);
if (zero_element_item == NULL) {
return NULL;
}
}
else {
zero_element_item = NULL;
}
if (zero_element_length == 0 ||
(
PyObject_HasAttrString(zero_element_item, "__iter__") &&
!PyObject_HasAttrString(zero_element_item, "strip")
)
) {
/*
* execute(stmt, [{}, {}, {}, ...])
* execute(stmt, [(), (), (), ...])
*/
Py_XDECREF(zero_element_item);
Py_INCREF(zero_element);
return zero_element;
}
else {
/*
* execute(stmt, ("value", "value"))
*/
Py_XDECREF(zero_element_item);
enclosing_list = PyList_New(1);
if (enclosing_list == NULL) {
return NULL;
}
Py_INCREF(zero_element);
if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
Py_DECREF(zero_element);
Py_DECREF(enclosing_list);
return NULL;
}
return enclosing_list;
}
}
else if (PyObject_HasAttrString(zero_element, "keys")) {
/*
* execute(stmt, {"key":"value"})
*/
enclosing_list = PyList_New(1);
if (enclosing_list == NULL) {
return NULL;
}
Py_INCREF(zero_element);
if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
Py_DECREF(zero_element);
Py_DECREF(enclosing_list);
return NULL;
}
return enclosing_list;
} else {
enclosing_list = PyList_New(1);
if (enclosing_list == NULL) {
return NULL;
}
double_enclosing_list = PyList_New(1);
if (double_enclosing_list == NULL) {
Py_DECREF(enclosing_list);
return NULL;
}
Py_INCREF(zero_element);
if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
Py_DECREF(zero_element);
Py_DECREF(enclosing_list);
Py_DECREF(double_enclosing_list);
return NULL;
}
if (PyList_SetItem(double_enclosing_list, 0, enclosing_list) == -1) {
Py_DECREF(zero_element);
Py_DECREF(enclosing_list);
Py_DECREF(double_enclosing_list);
return NULL;
}
return double_enclosing_list;
}
}
else {
zero_element = PyTuple_GetItem(multiparams, 0);
if (PyObject_HasAttrString(zero_element, "__iter__") &&
!PyObject_HasAttrString(zero_element, "strip")
) {
Py_INCREF(multiparams);
return multiparams;
}
else {
enclosing_list = PyList_New(1);
if (enclosing_list == NULL) {
return NULL;
}
Py_INCREF(multiparams);
if (PyList_SetItem(enclosing_list, 0, multiparams) == -1) {
Py_DECREF(multiparams);
Py_DECREF(enclosing_list);
return NULL;
}
return enclosing_list;
}
}
}
static PyMethodDef module_methods[] = {
{"_distill_params", distill_params, METH_VARARGS,
"Distill an execute() parameter structure."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
MODULE_NAME,
MODULE_DOC,
-1,
module_methods
};
#endif
#if PY_MAJOR_VERSION >= 3
PyMODINIT_FUNC
PyInit_cutils(void)
#else
PyMODINIT_FUNC
initcutils(void)
#endif
{
PyObject *m;
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&module_def);
#else
m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
#endif
#if PY_MAJOR_VERSION >= 3
if (m == NULL)
return NULL;
return m;
#else
if (m == NULL)
return;
#endif
}

View File

@ -1,31 +1,30 @@
# __init__.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
# databases/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.sqlite import base as sqlite
from sqlalchemy.dialects.postgresql import base as postgresql
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from ..dialects.sqlite import base as sqlite
from ..dialects.postgresql import base as postgresql
postgres = postgresql
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.firebird import base as firebird
from sqlalchemy.dialects.maxdb import base as maxdb
from sqlalchemy.dialects.informix import base as informix
from sqlalchemy.dialects.mssql import base as mssql
from sqlalchemy.dialects.access import base as access
from sqlalchemy.dialects.sybase import base as sybase
from ..dialects.mysql import base as mysql
from ..dialects.oracle import base as oracle
from ..dialects.firebird import base as firebird
from ..dialects.mssql import base as mssql
from ..dialects.sybase import base as sybase
__all__ = (
'access',
'firebird',
'informix',
'maxdb',
'mssql',
'mysql',
'postgresql',
'sqlite',
'oracle',
'sybase',
)
)

View File

@ -1,16 +1,21 @@
from sqlalchemy.dialects.firebird import base, kinterbasdb
# firebird/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
base.dialect = kinterbasdb.dialect
from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb
base.dialect = fdb.dialect
from sqlalchemy.dialects.firebird.base import \
SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \
TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\
dialect
__all__ = (
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
'dialect'
)

View File

@ -1,16 +1,17 @@
# firebird.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
# firebird/base.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for the Firebird database.
r"""
Connectivity is usually supplied via the kinterbasdb_ DBAPI module.
.. dialect:: firebird
:name: Firebird
Dialects
~~~~~~~~
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
@ -27,7 +28,7 @@ support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
~~~~~~~~~~~~~~~~
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
@ -47,20 +48,20 @@ The above use case can be alleviated by calling ``first()`` on the
all remaining cursor/connection resources.
RETURNING support
~~~~~~~~~~~~~~~~~
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\\
where(empl.c.sales>100).\\
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
@ -69,18 +70,17 @@ the SQLAlchemy ``returning()`` method, such as::
"""
import datetime, re
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.types import (BIGINT, BLOB, BOOLEAN, CHAR, DATE,
FLOAT, INTEGER, NUMERIC, SMALLINT,
TEXT, TIME, TIMESTAMP, VARCHAR)
from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
RESERVED_WORDS = set([
@ -120,65 +120,144 @@ RESERVED_WORDS = set([
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
])
])
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {
sqltypes.DateTime: _FBDateTime
}
ischema_names = {
'SHORT': SMALLINT,
'LONG': BIGINT,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': NUMERIC,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'SHORT': SMALLINT,
'LONG': INTEGER,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': BIGINT,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
'BLOB': BLOB,
}
'BLOB': BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime, _FBDate, etc.
# as bind/result functionality is required)
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_):
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_):
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
return '%s CHARACTER SET %s' % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosincrasies"""
"""Firebird specific idiosyncrasies"""
def visit_mod(self, binary, **kw):
# Firebird lacks a builtin modulo operator, but there is
# an equivalent function in the ib_udf library.
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).visit_alias(alias, asfrom=asfrom, **kwargs)
return super(FBCompiler, self).\
visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name, expression._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
alias_name = isinstance(alias.name,
expression._truncated_label) and \
self._truncated_identifier("alias",
alias.name) or alias.name
return self.process(alias.original, asfrom=asfrom, **kwargs) + " " + \
self.preparer.format_alias(alias, alias_name)
return self.process(
alias.original, asfrom=asfrom, **kwargs) + \
" " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
@ -200,8 +279,12 @@ class FBCompiler(sql.compiler.SQLCompiler):
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr)
return self.process(func.clause_expr, **kw)
else:
return ""
@ -211,41 +294,37 @@ class FBCompiler(sql.compiler.SQLCompiler):
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select):
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit:
result += "FIRST %d " % select._limit
if select._offset:
result +="SKIP %d " % select._offset
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select):
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self.process(
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map
)
for c in expression._select_iterables(returning_cols)
]
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosincrasies"""
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
@ -253,39 +332,50 @@ class FBDDLCompiler(sql.compiler.DDLCompiler):
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented("Firebird SEQUENCE doesn't support START WITH")
raise NotImplemented(
"Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented("Firebird SEQUENCE doesn't support INCREMENT BY")
raise NotImplemented(
"Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element)
return "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % self.preparer.format_sequence(create.element)
return "CREATE GENERATOR %s" % \
self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % self.preparer.format_sequence(drop.element)
return "DROP GENERATOR %s" % \
self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
['_'])
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar("SELECT gen_id(%s, 1) FROM rdb$database" % \
self.dialect.identifier_preparer.format_sequence(seq))
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
class FBDialect(default.DefaultDialect):
@ -305,7 +395,6 @@ class FBDialect(default.DefaultDialect):
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
@ -315,6 +404,8 @@ class FBDialect(default.DefaultDialect):
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
@ -322,7 +413,13 @@ class FBDialect(default.DefaultDialect):
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = self.server_version_info > (2, )
self._version_two = ('firebird' in self.server_version_info and
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
@ -330,8 +427,9 @@ class FBDialect(default.DefaultDialect):
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
else:
self.implicit_returning = True
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
@ -340,8 +438,10 @@ class FBDialect(default.DefaultDialect):
if name is None:
return None
elif name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
elif name.lower() == name:
return quoted_name(name, quote=True)
else:
return name
@ -349,16 +449,17 @@ class FBDialect(default.DefaultDialect):
if name is None:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring the `schema`."""
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 FROM rdb$database
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
@ -370,7 +471,7 @@ class FBDialect(default.DefaultDialect):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 FROM rdb$database
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
@ -380,18 +481,34 @@ class FBDialect(default.DefaultDialect):
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
SELECT DISTINCT rdb$relation_name
FROM rdb$relation_fields
WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
SELECT distinct rdb$view_name
FROM rdb$view_relations
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@ -410,7 +527,7 @@ class FBDialect(default.DefaultDialect):
return None
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
@ -422,10 +539,12 @@ class FBDialect(default.DefaultDialect):
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return pkfields
return {'constrained_columns': pkfields, 'name': None}
@reflection.cache
def get_column_sequence(self, connection, table_name, column_name, schema=None, **kw):
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
@ -436,14 +555,15 @@ class FBDialect(default.DefaultDialect):
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON trig.rdb$trigger_name=tabdep.rdb$dependent_name
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
@ -453,24 +573,29 @@ class FBDialect(default.DefaultDialect):
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT DISTINCT r.rdb$field_name AS fname,
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source, f.rdb$default_source) AS fdefault
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON f.rdb$character_set_id=cs.rdb$character_set_id
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pkey_cols = self.get_primary_keys(connection, table_name)
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint['constrained_columns']
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
@ -490,8 +615,10 @@ class FBDialect(default.DefaultDialect):
util.warn("Did not recognize type '%s' of column '%s'" %
(colspec, name))
coltype = sqltypes.NULLTYPE
elif colspec == 'INT64':
coltype = coltype(precision=row['fprec'], scale=row['fscale'] * -1)
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
@ -502,25 +629,29 @@ class FBDialect(default.DefaultDialect):
else:
coltype = BLOB()
else:
coltype = coltype(row)
coltype = coltype()
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip()=='DEFAULT', "Unrecognized default value: %s" % defexpr
assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
defvalue = None
col_d = {
'name' : name,
'type' : coltype,
'nullable' : not bool(row['null_flag']),
'default' : defvalue
'name': name,
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
@ -528,7 +659,7 @@ class FBDialect(default.DefaultDialect):
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols)==1 and name==pkey_cols[0]:
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d['sequence'] = seq_d
@ -547,7 +678,8 @@ class FBDialect(default.DefaultDialect):
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
@ -557,12 +689,12 @@ class FBDialect(default.DefaultDialect):
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(lambda:{
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
fks = util.defaultdict(lambda: {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
})
for row in c:
@ -571,10 +703,11 @@ class FBDialect(default.DefaultDialect):
if not fk['name']:
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(self.normalize_name(row['fname']))
fk['constrained_columns'].append(
self.normalize_name(row['fname']))
fk['referred_columns'].append(
self.normalize_name(row['targetfname']))
return fks.values()
self.normalize_name(row['targetfname']))
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
@ -586,10 +719,11 @@ class FBDialect(default.DefaultDialect):
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name = ic.rdb$index_name
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, field_name
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
@ -601,19 +735,7 @@ class FBDialect(default.DefaultDialect):
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(self.normalize_name(row['field_name']))
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return indexes.values()
def do_execute(self, cursor, statement, parameters, **kwargs):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, connection):
# Use the retaining feature, that keeps the transaction going
connection.rollback(True)
def do_commit(self, connection):
# Use the retaining feature, that keeps the transaction going
connection.commit(True)
return list(indexes.values())

View File

@ -0,0 +1,118 @@
# firebird/fdb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
:connectstring: firebird+fdb://user:password@host:port/path/to/db\
[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
.. versionadded:: 0.8 - Support for the fdb Firebird driver.
.. versionchanged:: 0.9 - The fdb dialect is now the default dialect
under the ``firebird://`` URL space, as ``fdb`` is now the official
Python driver for Firebird.
Arguments
----------
The ``fdb`` dialect is based on the
:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not
accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the ``enable_rowcount`` option with
:meth:`.Connection.execution_options`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print r.rowcount
* ``retaining`` - False by default. Setting this to True will pass the
``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
methods of the DBAPI connection, which can improve performance in some
situations, but apparently with significant caveats.
Please read the fdb and/or kinterbasdb DBAPI documentation in order to
understand the implications of this flag.
.. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying
transaction retaining behavior - in 0.8 it defaults to ``True``
for backwards compatibility.
.. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
In 0.8 it defaulted to ``True``.
.. seealso::
http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions
- information on the "retaining" flag.
"""
from .kinterbasdb import FBDialect_kinterbasdb
from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
enable_rowcount=enable_rowcount,
retaining=retaining, **kwargs)
@classmethod
def dbapi(cls):
return __import__('fdb')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
isc_info_firebird_version = 103
fbconn = connection.connection
version = fbconn.db_info(isc_info_firebird_version)
return self._parse_version_info(version)
dialect = FBDialect_fdb

View File

@ -1,69 +1,119 @@
# kinterbasdb.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
# firebird/kinterbasdb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
The most common way to connect to a Firebird engine is implemented by
kinterbasdb__, currently maintained__ directly by the Firebird people.
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\
[?key=value&key=value...]
:url: http://firebirdsql.org/index.php?op=devel&sub=python
The connection URL is of the form
``firebird[+kinterbasdb]://user:password@host:port/path/to/db[?key=value&key=value...]``.
Arguments
----------
Kinterbasedb backend specific keyword arguments are:
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
type_conv
select the kind of mapping done on the types: by default SQLAlchemy
uses 200 with Unicode, datetime and decimal support (see details__).
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
concurrency_level
set the backend policy with regards to threading issues: by default
SQLAlchemy uses policy 1 (see details__).
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
http://sourceforge.net/projects/kinterbasdb
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
__ http://sourceforge.net/projects/kinterbasdb
__ http://firebirdsql.org/index.php?op=devel&sub=python
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
"""
from sqlalchemy.dialects.firebird.base import FBDialect, FBCompiler
from sqlalchemy import util, types as sqltypes
from .base import FBDialect, FBExecutionContext
from ... import util, types as sqltypes
from re import match
import decimal
class _FBNumeric_kinterbasdb(sqltypes.Numeric):
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if value is not None:
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = 'kinterbasdb'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric:_FBNumeric_kinterbasdb
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
}
)
def __init__(self, type_conv=200, concurrency_level=1, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
)
def __init__(self, type_conv=200, concurrency_level=1,
enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
k = __import__('kinterbasdb')
return k
return __import__('kinterbasdb')
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
@ -72,17 +122,22 @@ class FBDialect_kinterbasdb(FBDialect):
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
type_conv = opts.pop('type_conv', self.type_conv)
concurrency_level = opts.pop('concurrency_level', self.concurrency_level)
concurrency_level = opts.pop('concurrency_level',
self.concurrency_level)
if self.dbapi is not None:
initialized = getattr(self.dbapi, 'initialized', None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, '_initialized', False)
if not initialized:
self.dbapi.init(type_conv=type_conv, concurrency_level=concurrency_level)
self.dbapi.init(type_conv=type_conv,
concurrency_level=concurrency_level)
return ([], opts)
def _get_server_version_info(self, connection):
@ -96,24 +151,33 @@ class FBDialect_kinterbasdb(FBDialect):
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature. This is more than enough for our purposes,
# as this is mainly (only?) used by the testsuite.
from re import match
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+) \w+ (\d+)\.(\d+)', version)
if not m:
raise AssertionError("Could not determine version from string '%s'" % version)
return tuple([int(x) for x in m.group(5, 6, 4)])
def is_disconnect(self, e):
if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)):
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r'\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg)
'Invalid cursor state' in msg or
'connection shutdown' in msg)
else:
return False

View File

@ -1,4 +1,12 @@
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, pymssql, zxjdbc, mxodbc
# mssql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
@ -11,9 +19,9 @@ from sqlalchemy.dialects.mssql.base import \
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
)
)

View File

@ -1,15 +1,34 @@
"""
The adodbapi dialect is not implemented for 0.6 at this time.
# mssql/adodbapi.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time values as datetime.date() objects.
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
@ -23,7 +42,7 @@ class MSDialect_adodbapi(MSDialect):
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = 'adodbapi'
@classmethod
def import_dbapi(cls):
import adodbapi as module
@ -32,28 +51,37 @@ class MSDialect_adodbapi(MSDialect):
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.DateTime:MSDateTime_adodbapi
sqltypes.DateTime: MSDateTime_adodbapi
}
)
def create_connect_args(self, url):
keys = url.query
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
keys = dict(
(k, check_quote(v)) for k, v in url.query.items()
)
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append ("Data Source=%s, %s" % (keys.get("host"), keys.get("port")))
connectors.append("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append ("Data Source=%s" % keys.get("host"))
connectors.append ("Initial Catalog=%s" % keys.get("database"))
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join (connectors)], {}]
return [[";".join(connectors)], {}]
def is_disconnect(self, e):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and "'connection failure'" in str(e)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)
dialect = MSDialect_adodbapi

File diff suppressed because it is too large Load Diff

View File

@ -1,83 +1,136 @@
from sqlalchemy import Table, MetaData, Column, ForeignKey
from sqlalchemy.types import String, Unicode, Integer, TypeDecorator
# mssql/information_schema.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# TODO: should be using the sys. catalog with SQL Server, not information
# schema
from ... import Table, MetaData, Column
from ...types import String, Unicode, UnicodeText, Integer, TypeDecorator
from ... import cast
from ... import util
from ...sql import expression
from ...ext.compiler import compiles
ischema = MetaData()
class CoerceUnicode(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
if isinstance(value, str):
if util.py2k and isinstance(value, util.binary_type):
value = value.decode(dialect.encoding)
return value
def bind_expression(self, bindvalue):
return _cast_on_2005(bindvalue)
class _cast_on_2005(expression.ColumnElement):
def __init__(self, bindvalue):
self.bindvalue = bindvalue
@compiles(_cast_on_2005)
def _compile(element, compiler, **kw):
from . import base
if compiler.dialect.server_version_info < base.MS_2005_VERSION:
return compiler.process(element.bindvalue, **kw)
else:
return compiler.process(cast(element.bindvalue, Unicode), **kw)
schemata = Table("SCHEMATA", ischema,
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
schema="INFORMATION_SCHEMA")
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
schema="INFORMATION_SCHEMA")
tables = Table("TABLES", ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"),
schema="INFORMATION_SCHEMA")
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column(
"TABLE_TYPE", String(convert_unicode=True),
key="table_type"),
schema="INFORMATION_SCHEMA")
columns = Table("COLUMNS", ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("IS_NULLABLE", Integer, key="is_nullable"),
Column("DATA_TYPE", String, key="data_type"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"),
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
Column("COLUMN_DEFAULT", Integer, key="column_default"),
Column("COLLATION_NAME", String, key="collation_name"),
schema="INFORMATION_SCHEMA")
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("IS_NULLABLE", Integer, key="is_nullable"),
Column("DATA_TYPE", String, key="data_type"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
Column("CHARACTER_MAXIMUM_LENGTH", Integer,
key="character_maximum_length"),
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
Column("COLUMN_DEFAULT", Integer, key="column_default"),
Column("COLLATION_NAME", String, key="collation_name"),
schema="INFORMATION_SCHEMA")
constraints = Table("TABLE_CONSTRAINTS", ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"),
schema="INFORMATION_SCHEMA")
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("CONSTRAINT_NAME", CoerceUnicode,
key="constraint_name"),
Column("CONSTRAINT_TYPE", String(
convert_unicode=True), key="constraint_type"),
schema="INFORMATION_SCHEMA")
column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
schema="INFORMATION_SCHEMA")
Column("TABLE_SCHEMA", CoerceUnicode,
key="table_schema"),
Column("TABLE_NAME", CoerceUnicode,
key="table_name"),
Column("COLUMN_NAME", CoerceUnicode,
key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode,
key="constraint_name"),
schema="INFORMATION_SCHEMA")
key_constraints = Table("KEY_COLUMN_USAGE", ischema,
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
schema="INFORMATION_SCHEMA")
Column("TABLE_SCHEMA", CoerceUnicode,
key="table_schema"),
Column("TABLE_NAME", CoerceUnicode,
key="table_name"),
Column("COLUMN_NAME", CoerceUnicode,
key="column_name"),
Column("CONSTRAINT_NAME", CoerceUnicode,
key="constraint_name"),
Column("ORDINAL_POSITION", Integer,
key="ordinal_position"),
schema="INFORMATION_SCHEMA")
ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, key="unique_constraint_catalog"), # TODO: is CATLOG misspelled ?
Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, key="unique_constraint_schema"),
Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"),
Column("MATCH_OPTION", String, key="match_option"),
Column("UPDATE_RULE", String, key="update_rule"),
Column("DELETE_RULE", String, key="delete_rule"),
schema="INFORMATION_SCHEMA")
Column("CONSTRAINT_CATALOG", CoerceUnicode,
key="constraint_catalog"),
Column("CONSTRAINT_SCHEMA", CoerceUnicode,
key="constraint_schema"),
Column("CONSTRAINT_NAME", CoerceUnicode,
key="constraint_name"),
# TODO: is CATLOG misspelled ?
Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode,
key="unique_constraint_catalog"),
Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode,
key="unique_constraint_schema"),
Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode,
key="unique_constraint_name"),
Column("MATCH_OPTION", String, key="match_option"),
Column("UPDATE_RULE", String, key="update_rule"),
Column("DELETE_RULE", String, key="delete_rule"),
schema="INFORMATION_SCHEMA")
views = Table("VIEWS", ischema,
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
Column("CHECK_OPTION", String, key="check_option"),
Column("IS_UPDATABLE", String, key="is_updatable"),
schema="INFORMATION_SCHEMA")
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
Column("CHECK_OPTION", String, key="check_option"),
Column("IS_UPDATABLE", String, key="is_updatable"),
schema="INFORMATION_SCHEMA")

View File

@ -1,55 +1,105 @@
# mssql/mxodbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for MS-SQL via mxODBC.
.. dialect:: mssql+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: mssql+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
mxODBC is available at:
http://www.egenix.com/
This was tested with mxODBC 3.1.2 and the SQL Server Native
Client connected to MSSQL 2005 and 2008 Express Editions.
Connecting
~~~~~~~~~~
Connection is via DSN::
mssql+mxodbc://<username>:<password>@<dsnname>
Execution Modes
~~~~~~~~~~~~~~~
---------------
mxODBC features two styles of statement execution, using the ``cursor.execute()``
and ``cursor.executedirect()`` methods (the second being an extension to the
DBAPI specification). The former makes use of the native
parameter binding services of the ODBC driver, while the latter uses string escaping.
The primary advantage to native parameter binding is that the same statement, when
executed many times, is only prepared once. Whereas the primary advantage to the
latter is that the rules for bind parameter placement are relaxed. MS-SQL has very
strict rules for native binds, including that they cannot be placed within the argument
lists of function calls, anywhere outside the FROM, or even within subqueries within the
FROM clause - making the usage of bind parameters within SELECT statements impossible for
all but the most simplistic statements. For this reason, the mxODBC dialect uses the
"native" mode by default only for INSERT, UPDATE, and DELETE statements, and uses the
escaped string mode for all other statements. This behavior can be controlled completely
via :meth:`~sqlalchemy.sql.expression.Executable.execution_options`
using the ``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a value of
``True`` will unconditionally use native bind parameters and a value of ``False`` will
uncondtionally use string-escaped parameters.
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will unconditionally use string-escaped parameters.
"""
import re
import sys
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
from sqlalchemy.dialects.mssql.base import (MSExecutionContext, MSDialect,
MSSQLCompiler, MSSQLStrictCompiler,
_MSDateTime, _MSDate, TIME)
from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
from .base import (MSDialect,
MSSQLStrictCompiler,
VARBINARY,
_MSDateTime, _MSDate, _MSTime)
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
"""Include pyodbc's numeric processor.
"""
class _MSDate_mxodbc(_MSDate):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s-%s-%s" % (value.year, value.month, value.day)
else:
return None
return process
class _MSTime_mxodbc(_MSTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s:%s:%s" % (value.hour, value.minute, value.second)
else:
return None
return process
class _VARBINARY_mxodbc(VARBINARY):
"""
mxODBC Support for VARBINARY column types.
This handles the special case for null VARBINARY values,
which maps None values to the mx.ODBC.Manager.BinaryNull symbol.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# should pull from mx.ODBC.Manager.BinaryNull
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
@ -57,27 +107,33 @@ class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
#todo - investigate whether the pyodbc execution context
# todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# TODO: may want to use this only if FreeTDS is not in use,
# since FreeTDS doesn't seem to use native binds.
statement_compiler = MSSQLStrictCompiler
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
# statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
# flag used by _MSNumeric_mxodbc
_need_decimal_fix = True
colspecs = {
#sqltypes.Numeric : _MSNumeric,
sqltypes.DateTime : _MSDateTime,
sqltypes.Date : _MSDate,
sqltypes.Time : TIME,
sqltypes.Numeric: _MSNumeric_mxodbc,
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate_mxodbc,
sqltypes.Time: _MSTime_mxodbc,
VARBINARY: _VARBINARY_mxodbc,
sqltypes.LargeBinary: _VARBINARY_mxodbc,
}
def __init__(self, description_encoding='latin-1', **params):
def __init__(self, description_encoding=None, **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc

View File

@ -1,41 +1,27 @@
"""
Support for the pymssql dialect.
This dialect supports pymssql 1.0 and greater.
pymssql is available at:
http://pymssql.sourceforge.net/
Connecting
^^^^^^^^^^
Sample connect string::
mssql+pymssql://<username>:<password>@<freetds_name>
Adding "?charset=utf8" or similar will cause pymssql to return
strings as Python unicode objects. This can potentially improve
performance in some scenarios as decoding of strings is
handled natively.
Limitations
^^^^^^^^^^^
pymssql inherits a lot of limitations from FreeTDS, including:
* no support for multibyte schema identifiers
* poor support for large decimals
* poor support for binary fields
* poor support for VARCHAR/CHAR fields over 255 characters
Please consult the pymssql documentation for further information.
# mssql/pymssql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
from sqlalchemy.dialects.mssql.base import MSDialect
from sqlalchemy import types as sqltypes, util, processors
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
import decimal
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
@ -44,29 +30,31 @@ class _MSNumeric_pymssql(sqltypes.Numeric):
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
max_identifier_length = 30
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric:_MSNumeric_pymssql,
sqltypes.Float:sqltypes.Float,
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql doesn't have a Binary method. we use string
# TODO: monkeypatching here is less than ideal
module.Binary = str
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
@ -75,7 +63,8 @@ class MSDialect_pymssql(MSDialect):
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers)
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
@ -84,18 +73,25 @@ class MSDialect_pymssql(MSDialect):
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
opts.pop('port', None)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
dialect = MSDialect_pymssql

View File

@ -1,99 +1,135 @@
"""
Support for MS-SQL via pyodbc.
# mssql/pyodbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
pyodbc is available at:
r"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
Connecting
^^^^^^^^^^
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
Examples of pyodbc connection string URLs:
DSN Connections
^^^^^^^^^^^^^^^
* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
The connection string that is created will appear like::
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
dsn=mydsn;Trusted_Connection=Yes
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named
``mydsn`` passing in the ``UID`` and ``PWD`` information. The
connection string that is created will appear like::
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects
using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD``
information, plus the additional connection configuration option
``LANGUAGE``. The connection string that is created will appear
like::
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection string
dynamically created that would appear like::
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string that is dynamically created, which also includes the port
information using the comma syntax. If your connection string
requires the port information to be passed as a ``port`` keyword
see the next example. This will create the following connection
string::
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
string that is dynamically created that includes the port
information as a separate ``port`` keyword. This will create the
following connection string::
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
as illustrated below using ``urllib.quote_plus``::
If you require a connection string that is outside the options
presented above, use the ``odbc_connect`` keyword to pass in a
urlencoded connection string. What gets passed in will be urldecoded
and passed directly.
import urllib
params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
For example::
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb
would create the following connection string::
Unicode Binds
-------------
dsn=mydsn;Database=db
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
Encoding your connection string can be easily accomplished through
the python shell. For example::
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
>>> import urllib
>>> urllib.quote_plus('dsn=mydsn;Database=db')
'dsn%3Dmydsn%3BDatabase%3Ddb'
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
"""
from sqlalchemy.dialects.mssql.base import MSExecutionContext, MSDialect
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, util
from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util, exc
import decimal
import re
class _ms_numeric_pyodbc(object):
class _MSNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
This is the only method that is proven to work with Pyodbc+MSSQL
without crashing (floats can be used but seem to cause sporadic
crashes).
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_MSNumeric_pyodbc, self).bind_processor(dialect)
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
@ -105,72 +141,106 @@ class _MSNumeric_pyodbc(sqltypes.Numeric):
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value._int]))
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in value._int]),
"0" * (value.adjusted() - (len(value._int)-1)))
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(value._int) - 1) > value.adjusted():
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in value._int][0:value.adjusted() + 1]),
"".join([str(s) for s in value._int][value.adjusted() + 1:]))
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in value._int][0:value.adjusted() + 1]))
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _VARBINARY_pyodbc(VARBINARY):
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same statement.
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an "INSERT .. DEFAULT VALUES"
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with no data (due to triggers, etc.)
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error, e:
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
@ -180,18 +250,43 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
pyodbc_driver_name = 'SQL Server'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric:_MSNumeric_pyodbc
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
def __init__(self, description_encoding='latin-1', **params):
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.description_encoding = description_encoding
self.use_scope_identity = self.dbapi and hasattr(self.dbapi.Cursor, 'nextset')
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
def _get_server_version_info(self, connection):
try:
raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')")
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008; additionally, unknown combinations of pyodbc aren't
# able to run this query.
return super(MSDialect_pyodbc, self).\
_get_server_version_info(connection)
else:
version = []
r = re.compile(r'[.\-]')
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MSDialect_pyodbc

View File

@ -1,26 +1,26 @@
"""Support for the Microsoft SQL Server database via the zxjdbc JDBC
connector.
JDBC Driver
-----------
Requires the jTDS driver, available from: http://jtds.sourceforge.net/
Connecting
----------
URLs are of the standard form of
``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``.
Additional arguments which may be specified either as query string
arguments on the URL, or as keyword arguments to
:func:`~sqlalchemy.create_engine()` will be passed as Connection
properties to the underlying JDBC driver.
# mssql/zxjdbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
from sqlalchemy.engine import base
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname\
[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import MSDialect, MSExecutionContext
from ... import engine
class MSExecutionContext_zxjdbc(MSExecutionContext):
@ -40,15 +40,17 @@ class MSExecutionContext_zxjdbc(MSExecutionContext):
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error, e:
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(self.compiled.statement.table)
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
@ -59,6 +61,9 @@ class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(int(x) for x in connection.connection.dbversion.split('.'))
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc

View File

@ -1,17 +1,31 @@
from sqlalchemy.dialects.mysql import base, mysqldb, oursql, pyodbc, zxjdbc, mysqlconnector
# mysql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from sqlalchemy.dialects.mysql.base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT,\
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT',
'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect'
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,87 @@
# mysql/cymysql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+cymysql
:name: CyMySQL
:dbapi: cymysql
:connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: https://github.com/nakagami/CyMySQL
"""
import re
from .mysqldb import MySQLDialect_mysqldb
from .base import (BIT, MySQLDialect)
from ... import util
class _cymysqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
"""
def process(value):
if value is not None:
v = 0
for i in util.iterbytes(value):
v = v << 8 | i
return v
return value
return process
class MySQLDialect_cymysql(MySQLDialect_mysqldb):
driver = 'cymysql'
description_encoding = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_unicode_statements = True
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
BIT: _cymysqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('cymysql')
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.server_version):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return True
else:
return False
dialect = MySQLDialect_cymysql

View File

@ -0,0 +1,311 @@
# mysql/enumerated.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .types import _StringType
from ... import exc, sql, util
from ... import types as sqltypes
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto' and len(values):
# What quoting character are we using?
q = None
for e in values:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if len(e) == 1 or e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_deprecated(
'Manually quoting %s value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.' % self.__class__.__name__)
values = self._strip_values(values)
self._enumerated_values = values
length = max([len(v) for v in values] + [0])
return values, length
@classmethod
def _strip_values(cls, values):
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
return strip_values
class ENUM(sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
__visit_name__ = 'ENUM'
def __init__(self, *enums, **kw):
"""Construct an ENUM.
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below). This object may also be a PEP-435-compliant enumerated
type.
.. versionadded: 1.1 added support for PEP-435-compliant enumerated
types.
:param strict: This flag has no effect.
.. versionchanged:: The MySQL ENUM type as well as the base Enum
type now validates all Python data values.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
kw.pop('strict', None)
validate_strings = kw.pop("validate_strings", False)
sqltypes.Enum.__init__(
self, validate_strings=validate_strings, *enums)
kw.pop('metadata', None)
kw.pop('schema', None)
kw.pop('name', None)
kw.pop('quote', None)
kw.pop('native_enum', None)
kw.pop('inherit_schema', None)
kw.pop('_create_events', None)
_StringType.__init__(self, length=self.length, **kw)
def _setup_for_values(self, values, objects, kw):
values, length = self._init_values(values, kw)
return sqltypes.Enum._setup_for_values(self, values, objects, kw)
def _object_value_for_elem(self, elem):
# mysql sends back a blank string for any value that
# was persisted that was not in the enums; that is, it does no
# validation on the incoming data, it "truncates" it to be
# the blank string. Return it straight.
if elem == "":
return elem
else:
return super(ENUM, self)._object_value_for_elem(elem)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
def adapt(self, cls, **kw):
return sqltypes.Enum.adapt(self, cls, **kw)
class SET(_EnumeratedValues):
"""MySQL SET type."""
__visit_name__ = 'SET'
def __init__(self, *values, **kw):
"""Construct a SET.
E.g.::
Column('myset', SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
:param values: The range of valid values for this SET.
:param convert_unicode: Same flag as that of
:paramref:`.String.convert_unicode`.
:param collation: same as that of :paramref:`.String.collation`
:param charset: same as that of :paramref:`.VARCHAR.charset`.
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
:param binary: same as that of :paramref:`.VARCHAR.binary`.
:param quoting: Defaults to 'auto': automatically determine set value
quoting. If all values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
.. versionadded:: 0.9.0
:param retrieve_as_bitwise: if True, the data for the set type will be
persisted and selected using an integer value, where a set is coerced
into a bitwise mask for persistence. MySQL allows this mode which
has the advantage of being able to store values unambiguously,
such as the blank string ``''``. The datatype will appear
as the expression ``col + 0`` in a SELECT statement, so that the
value is coerced into an integer value in result sets.
This flag is required if one wishes
to persist a set that can store the blank string ``''`` as a value.
.. warning::
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
essential that the list of set values is expressed in the
**exact same order** as exists on the MySQL database.
.. versionadded:: 1.0.0
"""
self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
values, length = self._init_values(values, kw)
self.values = tuple(values)
if not self.retrieve_as_bitwise and '' in values:
raise exc.ArgumentError(
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True")
if self.retrieve_as_bitwise:
self._bitmap = dict(
(value, 2 ** idx)
for idx, value in enumerate(self.values)
)
self._bitmap.update(
(2 ** idx, value)
for idx, value in enumerate(self.values)
)
kw.setdefault('length', length)
super(SET, self).__init__(**kw)
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
return sql.type_coerce(
sql.type_coerce(colexpr, sqltypes.Integer) + 0,
self
)
else:
return colexpr
def result_processor(self, dialect, coltype):
if self.retrieve_as_bitwise:
def process(value):
if value is not None:
value = int(value)
return set(
util.map_bits(self._bitmap.__getitem__, value)
)
else:
return None
else:
super_convert = super(SET, self).result_processor(dialect, coltype)
def process(value):
if isinstance(value, util.string_types):
# MySQLdb returns a string, let's parse
if super_convert:
value = super_convert(value)
return set(re.findall(r'[^,]+', value))
else:
# mysql-connector-python does a naive
# split(",") which throws in an empty string
if value is not None:
value.discard('')
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(value):
if value is None:
return None
elif isinstance(value, util.int_types + util.string_types):
if super_convert:
return super_convert(value)
else:
return value
else:
int_value = 0
for v in value:
int_value |= self._bitmap[v]
return int_value
else:
def process(value):
# accept strings and int (actually bitflag) values directly
if value is not None and not isinstance(
value, util.int_types + util.string_types):
value = ",".join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
return util.constructor_copy(
self, impltype,
*self.values,
**kw
)

View File

@ -0,0 +1,102 @@
# mysql/gaerdbms.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/\
developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. versionadded:: 0.7.8
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts['dsn'] = ''
opts['instance'] = url.query['instance']
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms

View File

@ -0,0 +1,79 @@
# mysql/json.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import json
from ...sql import elements
from ... import types as sqltypes
from ... import util
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
support JSON at the time of this writing.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`.types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join([
"[%s]" % elem if isinstance(elem, int)
else '."%s"' % elem for elem in value
])
)

View File

@ -1,28 +1,34 @@
"""Support for the MySQL database via the MySQL Connector/Python adapter.
# mysql/mysqlconnector.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
MySQL Connector/Python is available at:
"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@\
<host>[:<port>]/<dbname>
:url: http://dev.mysql.com/downloads/connector/python/
https://launchpad.net/myconnpy
Connecting
-----------
Unicode
-------
Connect string format::
mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer,
BIT)
from ... import util
import re
from sqlalchemy.dialects.mysql.base import (MySQLDialect,
MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer,
BIT)
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
from sqlalchemy import processors
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
@ -31,17 +37,36 @@ class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
@ -49,10 +74,12 @@ class _myconnpyBIT(BIT):
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = 'mysqlconnector'
supports_unicode_statements = True
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
@ -71,6 +98,10 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
}
)
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
@ -78,48 +109,75 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
util.coerce_kw_type(opts, 'allow_local_infile', bool)
util.coerce_kw_type(opts, 'autocommit', bool)
util.coerce_kw_type(opts, 'buffered', bool)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connection_timeout', int)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'consume_results', bool)
util.coerce_kw_type(opts, 'force_ipv6', bool)
util.coerce_kw_type(opts, 'get_warnings', bool)
util.coerce_kw_type(opts, 'pool_reset_session', bool)
util.coerce_kw_type(opts, 'pool_size', int)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
opts['buffered'] = True
opts['raise_on_warnings'] = True
util.coerce_kw_type(opts, 'raw', bool)
util.coerce_kw_type(opts, 'ssl_verify_cert', bool)
util.coerce_kw_type(opts, 'use_pure', bool)
util.coerce_kw_type(opts, 'use_unicode', bool)
# unfortunately, MySQL/connector python refuses to release a
# cursor without reading fully, so non-buffered isn't an option
opts.setdefault('buffered', True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
client_flags = opts.get('client_flags', ClientFlag.get_default())
client_flags = opts.get(
'client_flags', ClientFlag.get_default())
client_flags |= ClientFlag.FOUND_ROWS
opts['client_flags'] = client_flags
except:
except Exception:
pass
return [[], opts]
@util.memoized_property
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
return tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
from mysql.connector.constants import ClientFlag
dbapi_con.set_client_flag(ClientFlag.FOUND_ROWS)
version = dbapi_con.get_server_version()
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.get_characterset_info()
return connection.connection.charset
def _extract_error_code(self, exception):
try:
return exception.orig.errno
except AttributeError:
return None
return exception.errno
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError,self.dbapi.InterfaceError)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return e.errno in errnos
return e.errno in errnos or \
"MySQL Connection not available." in str(e)
else:
return False
@ -129,4 +187,17 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'AUTOCOMMIT'])
def _set_isolation_level(self, connection, level):
if level == 'AUTOCOMMIT':
connection.autocommit = True
else:
connection.autocommit = False
super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
connection, level)
dialect = MySQLDialect_mysqlconnector

View File

@ -1,85 +1,87 @@
"""Support for the MySQL database via the MySQL-python adapter.
MySQL-Python is available at:
http://sourceforge.net/projects/mysql-python
At least version 1.2.1 or 1.2.2 should be used.
Connecting
-----------
Connect string format::
mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
Character Sets
--------------
Many MySQL server installations default to a ``latin1`` encoding for client
connections. All data sent through the connection will be converted into
``latin1``, even if you have ``utf8`` or another character set on your tables
and columns. With versions 4.1 and higher, you can change the connection
character set either through server configuration or by including the
``charset`` parameter in the URL used for ``create_engine``. The ``charset``
option is passed through to MySQL-Python and has the side-effect of also
enabling ``use_unicode`` in the driver by default. For regular encoded
strings, also pass ``use_unicode=0`` in the connection arguments::
# set client encoding to utf8; all strings come back as unicode
create_engine('mysql+mysqldb:///mydb?charset=utf8')
# set client encoding to utf8; all strings come back as utf8 str
create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0')
Known Issues
-------------
MySQL-python at least as of version 1.2.2 has a serious memory leak related
to unicode conversion, a feature which is disabled via ``use_unicode=0``.
The recommended connection form with SQLAlchemy is::
engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600)
# mysql/mysqldb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
.. _mysqldb_unicode:
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Py3K Support
------------
Currently, MySQLdb only runs on Python 2 and development has been stopped.
`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well
as some bugfixes.
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
Server Side Cursors
-------------------
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from .base import TEXT
from ... import sql
from ... import util
import re
from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
from sqlalchemy import processors
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLCompiler_mysqldb(MySQLCompiler):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class MySQLDialect_mysqldb(MySQLDialect):
driver = 'mysqldb'
supports_unicode_statements = False
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
@ -89,13 +91,20 @@ class MySQLDialect_mysqldb(MySQLDialect):
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
}
)
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_mysqldb, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@util.langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__('MySQLdb.cursors').cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__('MySQLdb')
@ -105,6 +114,30 @@ class MySQLDialect_mysqldb(MySQLDialect):
if context is not None:
context._rowcount = rowcount
def _check_unicode_returns(self, connection):
# work around issue fixed in
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8_bin collation and unicode returns
has_utf8_bin = self.server_version_info > (5, ) and \
connection.scalar(
"show collation where %s = 'utf8' and %s = 'utf8_bin'"
% (
self.identifier_preparer.quote("Charset"),
self.identifier_preparer.quote("Collation")
))
if has_utf8_bin:
additional_tests = [
sql.collate(sql.cast(
sql.literal_column(
"'test collated returns'"),
TEXT(charset='utf8')), "utf8_bin")
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
connection, additional_tests)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
@ -112,11 +145,12 @@ class MySQLDialect_mysqldb(MySQLDialect):
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned
# as Unicode, both in raw SQL operations and with column types like
# String and MSString.
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
@ -124,7 +158,8 @@ class MySQLDialect_mysqldb(MySQLDialect):
# query string.
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']
for key in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
@ -137,17 +172,19 @@ class MySQLDialect_mysqldb(MySQLDialect):
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
from MySQLdb.constants import CLIENT as CLIENT_FLAGS
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + '.constants.CLIENT'
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except:
pass
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts['client_flag'] = client_flag
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
@ -156,47 +193,36 @@ class MySQLDialect_mysqldb(MySQLDialect):
return tuple(version)
def _extract_error_code(self, exception):
try:
return exception.orig.args[0]
except AttributeError:
return None
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
# on a connection via set_character_set()
if self.server_version_info < (4, 1, 0):
try:
return connection.connection.character_set_name()
except AttributeError:
# < 1.2.1 final MySQL-python drivers have no charset support.
# a query is needed.
pass
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
if 'character_set_results' in opts:
return opts['character_set_results']
try:
return connection.connection.character_set_name()
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name = connection.connection.character_set_name
except AttributeError:
# Still no charset on < 1.2.1 final...
if 'character_set' in opts:
return opts['character_set']
else:
util.warn(
"Could not detect the connection character set with this "
"combination of MySQL server and MySQL-python. "
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
return 'latin1'
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1.")
return 'latin1'
else:
return cset_name()
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'AUTOCOMMIT'])
def _set_isolation_level(self, connection, level):
if level == 'AUTOCOMMIT':
connection.autocommit(True)
else:
connection.autocommit(False)
super(MySQLDialect_mysqldb, self)._set_isolation_level(connection,
level)
dialect = MySQLDialect_mysqldb

View File

@ -1,46 +1,31 @@
"""Support for the MySQL database via the oursql adapter.
# mysql/oursql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
OurSQL is available at:
"""
http://packages.python.org/oursql/
Connecting
-----------
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
Connect string format::
Unicode
-------
mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Character Sets
--------------
oursql defaults to using ``utf8`` as the connection charset, but other
encodings may be used instead. Like the MySQL-Python driver, unicode support
can be completely disabled::
# oursql sets the connection charset to utf8 automatically; all strings come
# back as utf8 str
create_engine('mysql+oursql:///mydb?use_unicode=0')
To not automatically use ``utf8`` and instead use whatever the connection
defaults to, there is a separate parameter::
# use the default connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?default_charset=1')
# use latin1 as the connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?charset=latin1')
"""
import re
from sqlalchemy.dialects.mysql.base import (BIT, MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
from sqlalchemy import processors
from .base import (BIT, MySQLDialect, MySQLExecutionContext)
from ... import types as sqltypes, util
class _oursqlBIT(BIT):
@ -55,18 +40,17 @@ class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get('_oursql_plain_query', False)
class MySQLDialect_oursql(MySQLDialect):
driver = 'oursql'
# Py3K
# description_encoding = None
# Py2K
supports_unicode_binds = True
supports_unicode_statements = True
# end Py2K
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
@ -84,7 +68,8 @@ class MySQLDialect_oursql(MySQLDialect):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of *cursor.execute(statement, parameters)*."""
"""Provide an implementation of
*cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
@ -95,103 +80,109 @@ class MySQLDialect_oursql(MySQLDialect):
connection.cursor().execute('BEGIN', plain_query=True)
def _xa_query(self, connection, query, xid):
# Py2K
arg = connection.connection._escape_string(xid)
# end Py2K
# Py3K
# charset = self._connection_charset
# arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
connection.execution_options(
_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, 'XA BEGIN "%s"', xid)
self._xa_query(connection, 'XA BEGIN %s', xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, 'XA END "%s"', xid)
self._xa_query(connection, 'XA PREPARE "%s"', xid)
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA PREPARE %s', xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self._xa_query(connection, 'XA END "%s"', xid)
self._xa_query(connection, 'XA ROLLBACK "%s"', xid)
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA ROLLBACK %s', xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, 'XA COMMIT "%s"', xid)
self._xa_query(connection, 'XA COMMIT %s', xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
table_name, schema)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
table_name,
schema = schema,
**kw
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
schema=schema,
**kw
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
schema
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
**kw
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self,
connection.execution_options(_oursql_plain_query=True)
)
self,
connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(self, connection, table, charset=None,
full_name=None):
return MySQLDialect._show_create_table(self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name)
return MySQLDialect._show_create_table(
self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name
)
def is_disconnect(self, e):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] \
and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
@ -203,6 +194,7 @@ class MySQLDialect_oursql(MySQLDialect):
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
@ -216,12 +208,22 @@ class MySQLDialect_oursql(MySQLDialect):
# supports_sane_rowcount.
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.server_info):
try:
version.append(int(n))
@ -230,14 +232,11 @@ class MySQLDialect_oursql(MySQLDialect):
return tuple(version)
def _extract_error_code(self, exception):
try:
return exception.orig.errno
except AttributeError:
return None
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):

View File

@ -0,0 +1,70 @@
# mysql/pymysql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: http://www.pymysql.org/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import langhelpers, py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_pymysql, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__('pymysql.cursors').cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__('pymysql')
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql

View File

@ -1,32 +1,33 @@
"""Support for the MySQL database via the pyodbc adapter.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
----------
Connect string::
mysql+pyodbc://<username>:<password>@<dsnname>
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
# mysql/pyodbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.engine import base as engine_base
from sqlalchemy import util
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
.. note:: The PyODBC for MySQL dialect is not well supported, and
is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25).
Other dialects for MySQL are recommended.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
@ -36,12 +37,13 @@ class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
@ -62,11 +64,12 @@ class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.orig.args))
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)

View File

@ -0,0 +1,450 @@
# mysql/reflection.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from ... import log, util
from ... import types as sqltypes
from .enumerated import _EnumeratedValues, SET
from .types import DATETIME, TIME, TIMESTAMP
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'constraint':
state.constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec['name'], spec['coltype'], spec['arg']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
if type_args:
type_kw['fsp'] = type_args.pop(0)
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and '' in type_args:
type_kw['retrieve_as_bitwise'] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw['nullable'] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get('notnull', False) == 'NOT NULL':
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>(?:NOT )?NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>(?:NOT )NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r'(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex(
'RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)

View File

@ -0,0 +1,766 @@
# mysql/types.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import datetime
from ... import exc, util
from ... import types as sqltypes
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_NumericType, sqltypes.Numeric])
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
return util.generic_repr(self, to_inspect=[_FloatType,
_NumericType,
sqltypes.Float])
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self, to_inspect=[_IntegerType,
_NumericType,
sqltypes.Integer])
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False, unicode=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
kw.setdefault('collation', kw.pop('collate', collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_StringType, sqltypes.String])
class _MatchType(sqltypes.Float, sqltypes.MatchType):
def __init__(self, **kw):
# TODO: float arguments?
sqltypes.Float.__init__(self)
sqltypes.MatchType.__init__(self)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision,
scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in value:
if not isinstance(i, int):
i = ord(i) # convert byte to int on Python 2
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = 'TIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8 The MySQL-specific TIME
type as well as fractional seconds support.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = 'TIMESTAMP'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
with fractional seconds support.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = 'DATETIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
with fractional seconds support.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'

View File

@ -1,17 +1,21 @@
"""Support for the MySQL database via Jython's zxjdbc JDBC connector.
# mysql/zxjdbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
JDBC Driver
-----------
"""
The official MySQL JDBC driver is at
http://dev.mysql.com/downloads/connector/j/.
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\
<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
Connecting
----------
Connect string format:
mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
Character Sets
--------------
@ -20,14 +24,15 @@ SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overriden via a ``create_engine`` URL parameter.
overridden via a ``create_engine`` URL parameter.
"""
import re
from sqlalchemy import types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mysql.base import BIT, MySQLDialect, MySQLExecutionContext
from ... import types as sqltypes, util
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
@ -37,7 +42,7 @@ class _ZxJDBCBit(BIT):
return value
if isinstance(value, bool):
return int(value)
v = 0L
v = 0
for i in value:
v = v << 8 | (i & 0xff)
value = v
@ -82,7 +87,8 @@ class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
@ -92,15 +98,15 @@ class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.orig.args))
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self,connection):
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))

View File

@ -1,17 +1,24 @@
# oracle/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc
base.dialect = cx_oracle.dialect
from sqlalchemy.dialects.oracle.base import \
VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\
VARCHAR, NVARCHAR, CHAR, DATE, NUMBER,\
BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
VARCHAR2, NVARCHAR2
VARCHAR2, NVARCHAR2, ROWID, dialect
__all__ = (
'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'DATETIME', 'NUMBER',
'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
'VARCHAR2', 'NVARCHAR2'
'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER',
'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
'VARCHAR2', 'NVARCHAR2', 'ROWID'
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,19 @@
"""Support for the Oracle database via the zxjdbc JDBC connector.
# oracle/zxjdbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
JDBC Driver
-----------
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
The official Oracle JDBC driver is at
http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
"""
import decimal
@ -12,12 +21,16 @@ import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import base, default
from sqlalchemy.dialects.oracle.base import (OracleCompiler,
OracleDialect,
OracleExecutionContext)
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
@ -32,7 +45,7 @@ class _ZxJDBCDate(sqltypes.Date):
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
@ -53,10 +66,11 @@ class _ZxJDBCNumeric(sqltypes.Numeric):
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
self.returning_cols = list(
expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
columns = [self.process(c, within_columns_clause=False)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
@ -64,14 +78,17 @@ class OracleCompiler_zxjdbc(OracleCompiler):
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
dbtype = col.type.dialect_impl(
self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
bindparam = sql.bindparam(
"ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
binds.append(
self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
@ -88,15 +105,19 @@ class OracleExecutionContext_zxjdbc(OracleExecutionContext):
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
rrs.next()
except SQLException, sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
next(rrs)
except SQLException as sqle:
msg = '%s [SQLCode: %d]' % (
sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
row = tuple(
self.cursor.datahandler.getPyObject(
rrs, index, dbtype)
for index, dbtype in
self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
@ -106,15 +127,15 @@ class OracleExecutionContext_zxjdbc(OracleExecutionContext):
pass
self.statement.close()
return base.ResultProxy(self)
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._connection.connection.cursor()
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(base.FullyBufferedResultProxy):
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
@ -132,7 +153,7 @@ class ReturningResultProxy(base.FullyBufferedResultProxy):
return ret
def _buffer_rows(self):
return [self._returning_row]
return collections.deque([self._returning_row])
class ReturningParam(object):
@ -157,8 +178,8 @@ class ReturningParam(object):
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
return '<%s.%s object at 0x%x type=%s>' % (
kls.__module__, kls.__name__, id(self), self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
@ -171,7 +192,7 @@ class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date : _ZxJDBCDate,
sqltypes.Date: _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
@ -182,28 +203,33 @@ class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(self, statement, index, object)
OracleDataHandler.setJDBCObject(
self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype)
OracleDataHandler.setJDBCObject(
self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
self.implicit_returning = \
connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
return 'jdbc:oracle:thin:@%s:%s:%s' % (
url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
version = re.search(
r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc

View File

@ -0,0 +1,314 @@
# postgresql/array.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ...sql import expression, operators
from ...sql.base import SchemaEventTarget
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
if _assume_scalar or operator is operators.getitem:
# if getitem->slice were called, Indexable produces
# a Slice object from that
assert isinstance(obj, int)
return expression.BindParameter(
None, obj, _compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type, unique=True)
else:
return array([
self._bind_param(operator, o, _assume_scalar=True, type_=type_)
for o in obj])
def self_group(self, against=None):
if (against in (
operators.any_op, operators.all_op, operators.getitem)):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
"""PostgreSQL ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
a subclass of the core :class:`.types.ARRAY` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
as the core :class:`.types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
core :class:`.types.ARRAY` type, including support for "dimensions", indexed
access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`.postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`.types.ARRAY` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and PostgreSQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
ischema_names['_array'] = ARRAY

View File

@ -0,0 +1,213 @@
# postgresql/on_conflict.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ...sql.elements import ClauseElement, _literal_as_binds
from ...sql.dml import Insert as StandardInsert
from ...sql.expression import alias
from ...sql import schema
from ...util.langhelpers import public_factory
from ...sql.base import _generative
from ... import util
from . import ext
__all__ = ('Insert', 'insert')
class Insert(StandardInsert):
"""PostgreSQL-specific implementation of INSERT.
Adds methods for PG-specific syntaxes such as ON CONFLICT.
.. versionadded:: 1.1
"""
@util.memoized_property
def excluded(self):
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
PG's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenaceable.
.. seealso::
:ref:`postgresql_insert_on_conflict` - example of how
to use :attr:`.Insert.excluded`
"""
return alias(self.table, name='excluded').columns
@_generative
def on_conflict_do_update(
self,
constraint=None, index_elements=None,
index_where=None, set_=None, where=None):
"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
Either the ``constraint`` or ``index_elements`` argument is
required, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set_:
Required argument. A dictionary or other mapping object
with column names as keys and expressions or literals as values,
specifying the ``SET`` actions to take.
If the target :class:`.Column` specifies a ".key" attribute distinct
from the column name, that key should be used.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoUpdate(
constraint, index_elements, index_where, set_, where)
return self
@_generative
def on_conflict_do_nothing(
self,
constraint=None, index_elements=None, index_where=None):
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
The ``constraint`` and ``index_elements`` arguments
are optional, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoNothing(
constraint, index_elements, index_where)
return self
insert = public_factory(Insert, '.dialects.postgresql.insert')
class OnConflictClause(ClauseElement):
def __init__(
self,
constraint=None,
index_elements=None,
index_where=None):
if constraint is not None:
if not isinstance(constraint, util.string_types) and \
isinstance(constraint, (
schema.Index, schema.Constraint,
ext.ExcludeConstraint)):
constraint = getattr(constraint, 'name') or constraint
if constraint is not None:
if index_elements is not None:
raise ValueError(
"'constraint' and 'index_elements' are mutually exclusive")
if isinstance(constraint, util.string_types):
self.constraint_target = constraint
self.inferred_target_elements = None
self.inferred_target_whereclause = None
elif isinstance(constraint, schema.Index):
index_elements = constraint.expressions
index_where = \
constraint.dialect_options['postgresql'].get("where")
elif isinstance(constraint, ext.ExcludeConstraint):
index_elements = constraint.columns
index_where = constraint.where
else:
index_elements = constraint.columns
index_where = \
constraint.dialect_options['postgresql'].get("where")
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
elif constraint is None:
self.constraint_target = self.inferred_target_elements = \
self.inferred_target_whereclause = None
class OnConflictDoNothing(OnConflictClause):
__visit_name__ = 'on_conflict_do_nothing'