Browse Source

Packages update

pull/84/head
Ruud 13 years ago
parent
commit
02e01fb2d6
  1. 2
      couchpotato/core/providers/movie/themoviedb/main.py
  2. 2353
      libs/argparse.py
  3. 7
      libs/decorator.py
  4. 40
      libs/flask/app.py
  5. 44
      libs/flask/blueprints.py
  6. 6
      libs/flask/ctx.py
  7. 86
      libs/flask/helpers.py
  8. 22
      libs/flask/views.py
  9. 26
      libs/jinja2/filters.py
  10. 17
      libs/jinja2/runtime.py
  11. 19
      libs/jinja2/utils.py
  12. 2
      libs/migrate/__init__.py
  13. 2
      libs/migrate/changeset/__init__.py
  14. 108
      libs/migrate/changeset/ansisql.py
  15. 3
      libs/migrate/changeset/constraint.py
  16. 12
      libs/migrate/changeset/databases/firebird.py
  17. 43
      libs/migrate/changeset/databases/mysql.py
  18. 7
      libs/migrate/changeset/databases/oracle.py
  19. 12
      libs/migrate/changeset/databases/postgres.py
  20. 10
      libs/migrate/changeset/databases/sqlite.py
  21. 32
      libs/migrate/changeset/schema.py
  22. 17
      libs/migrate/versioning/api.py
  23. 183
      libs/migrate/versioning/genmodel.py
  24. 15
      libs/migrate/versioning/repository.py
  25. 21
      libs/migrate/versioning/schema.py
  26. 77
      libs/migrate/versioning/schemadiff.py
  27. 16
      libs/migrate/versioning/script/py.py
  28. 1
      libs/migrate/versioning/template.py
  29. 5
      libs/migrate/versioning/templates/manage.py_tmpl
  30. 4
      libs/migrate/versioning/templates/manage/default.py_tmpl
  31. 3
      libs/migrate/versioning/templates/manage/pylons.py_tmpl
  32. 5
      libs/migrate/versioning/templates/repository/default/migrate.cfg
  33. 5
      libs/migrate/versioning/templates/repository/pylons/migrate.cfg
  34. 6
      libs/migrate/versioning/templates/script/default.py_tmpl
  35. 6
      libs/migrate/versioning/templates/script/pylons.py_tmpl
  36. 39
      libs/migrate/versioning/version.py
  37. 1542
      libs/pytwitter/__init__.py
  38. 4
      libs/requests/__init__.py
  39. 3
      libs/requests/api.py
  40. 30
      libs/requests/async.py
  41. 40
      libs/requests/auth.py
  42. 102
      libs/requests/compat.py
  43. 6
      libs/requests/defaults.py
  44. 18
      libs/requests/hooks.py
  45. 218
      libs/requests/models.py
  46. 2
      libs/requests/packages/oreos/monkeys.py
  47. 4
      libs/requests/packages/urllib3/__init__.py
  48. 2
      libs/requests/packages/urllib3/_collections.py
  49. 124
      libs/requests/packages/urllib3/connectionpool.py
  50. 41
      libs/requests/packages/urllib3/exceptions.py
  51. 33
      libs/requests/packages/urllib3/filepost.py
  52. 47
      libs/requests/packages/urllib3/packages/mimetools_choose_boundary/__init__.py
  53. 372
      libs/requests/packages/urllib3/packages/six.py
  54. 28
      libs/requests/packages/urllib3/poolmanager.py
  55. 8
      libs/requests/packages/urllib3/request.py
  56. 83
      libs/requests/packages/urllib3/response.py
  57. 10
      libs/requests/sessions.py
  58. 2
      libs/requests/status_codes.py
  59. 4
      libs/requests/structures.py
  60. 61
      libs/requests/utils.py
  61. 438
      libs/simplejson/__init__.py
  62. 2602
      libs/simplejson/_speedups.c
  63. 421
      libs/simplejson/decoder.py
  64. 501
      libs/simplejson/encoder.py
  65. 119
      libs/simplejson/ordered_dict.py
  66. 77
      libs/simplejson/scanner.py
  67. 39
      libs/simplejson/tool.py
  68. 11
      libs/sqlalchemy/__init__.py
  69. 64
      libs/sqlalchemy/cextension/processors.c
  70. 6
      libs/sqlalchemy/cextension/resultproxy.c
  71. 2
      libs/sqlalchemy/connectors/__init__.py
  72. 11
      libs/sqlalchemy/connectors/mxodbc.py
  73. 150
      libs/sqlalchemy/connectors/mysqldb.py
  74. 44
      libs/sqlalchemy/connectors/pyodbc.py
  75. 4
      libs/sqlalchemy/connectors/zxJDBC.py
  76. 4
      libs/sqlalchemy/databases/__init__.py
  77. 3
      libs/sqlalchemy/dialects/__init__.py
  78. 9
      libs/sqlalchemy/dialects/access/base.py
  79. 18
      libs/sqlalchemy/dialects/drizzle/__init__.py
  80. 582
      libs/sqlalchemy/dialects/drizzle/base.py
  81. 69
      libs/sqlalchemy/dialects/drizzle/mysqldb.py
  82. 2
      libs/sqlalchemy/dialects/firebird/__init__.py
  83. 34
      libs/sqlalchemy/dialects/firebird/base.py
  84. 25
      libs/sqlalchemy/dialects/firebird/kinterbasdb.py
  85. 2
      libs/sqlalchemy/dialects/informix/__init__.py
  86. 125
      libs/sqlalchemy/dialects/informix/base.py
  87. 4
      libs/sqlalchemy/dialects/informix/informixdb.py
  88. 2
      libs/sqlalchemy/dialects/maxdb/__init__.py
  89. 77
      libs/sqlalchemy/dialects/maxdb/base.py
  90. 2
      libs/sqlalchemy/dialects/maxdb/sapdb.py
  91. 2
      libs/sqlalchemy/dialects/mssql/__init__.py
  92. 5
      libs/sqlalchemy/dialects/mssql/adodbapi.py
  93. 213
      libs/sqlalchemy/dialects/mssql/base.py
  94. 4
      libs/sqlalchemy/dialects/mssql/information_schema.py
  95. 8
      libs/sqlalchemy/dialects/mssql/mxodbc.py
  96. 6
      libs/sqlalchemy/dialects/mssql/pymssql.py
  97. 49
      libs/sqlalchemy/dialects/mssql/pyodbc.py
  98. 2
      libs/sqlalchemy/dialects/mssql/zxjdbc.py
  99. 4
      libs/sqlalchemy/dialects/mysql/__init__.py
  100. 317
      libs/sqlalchemy/dialects/mysql/base.py

2
couchpotato/core/providers/movie/themoviedb/main.py

@ -16,7 +16,7 @@ class TheMovieDb(MovieProvider):
addEvent('movie.info_by_tmdb', self.getInfoByTMDBId)
# Use base wrapper
tmdb.Config.api_key = self.conf('api_key')
tmdb.configure(self.conf('api_key'))
def byHash(self, file):
''' Find movie by hash '''

2353
libs/argparse.py

File diff suppressed because it is too large

7
libs/decorator.py

@ -28,7 +28,7 @@ Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.3.0'
__version__ = '3.3.2'
__all__ = ["decorator", "FunctionMaker", "partial"]
@ -127,6 +127,7 @@ class FunctionMaker(object):
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
@ -193,7 +194,7 @@ def decorator(caller, func=None):
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func)
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
@ -205,5 +206,5 @@ def decorator(caller, func=None):
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__)

40
libs/flask/app.py

@ -664,7 +664,7 @@ class Flask(_PackageBoundObject):
# existing views.
context.update(orig_ctx)
def run(self, host='127.0.0.1', port=5000, debug=None, **options):
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
@ -684,9 +684,10 @@ class Flask(_PackageBoundObject):
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. set this to ``'0.0.0.0'``
to have the server available externally as well.
:param port: the port of the webserver
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
@ -695,6 +696,10 @@ class Flask(_PackageBoundObject):
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
@ -711,6 +716,17 @@ class Flask(_PackageBoundObject):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
@ -1018,11 +1034,21 @@ class Flask(_PackageBoundObject):
function name will be used.
"""
def decorator(f):
self.jinja_env.filters[name or f.__name__] = f
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
@ -1105,7 +1131,7 @@ class Flask(_PackageBoundObject):
registered error handlers and fall back to returning the
exception as response.
.. versionadded: 0.3
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
if handlers and e.code in handlers:
@ -1174,7 +1200,7 @@ class Flask(_PackageBoundObject):
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded: 0.3
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()

44
libs/flask/blueprints.py

@ -59,7 +59,7 @@ class BlueprintSetupState(object):
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint = None, view_func = None, **options):
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
@ -73,7 +73,7 @@ class BlueprintSetupState(object):
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults = defaults, **options)
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
@ -89,9 +89,9 @@ class Blueprint(_PackageBoundObject):
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder = None,
static_url_path = None, template_folder = None,
url_prefix = None, subdomain = None, url_defaults = None):
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
@ -128,14 +128,14 @@ class Blueprint(_PackageBoundObject):
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration = False):
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration = False):
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
@ -146,8 +146,8 @@ class Blueprint(_PackageBoundObject):
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func = self.send_static_file,
endpoint = 'static')
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
@ -162,7 +162,7 @@ class Blueprint(_PackageBoundObject):
return f
return decorator
def add_url_rule(self, rule, endpoint = None, view_func = None, **options):
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
@ -185,6 +185,30 @@ class Blueprint(_PackageBoundObject):
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of

6
libs/flask/ctx.py

@ -21,9 +21,9 @@ class _RequestGlobals(object):
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance if you want to take advantage
of request information is it's available but fail silently if the request
object is unavailable.
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::

86
libs/flask/helpers.py

@ -11,8 +11,10 @@
from __future__ import with_statement
import imp
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
@ -249,7 +251,7 @@ def flash(message, category='message'):
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged: 0.3
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
@ -262,30 +264,40 @@ def flash(message, category='message'):
session.setdefault('_flashes', []).append((category, message))
def get_flashed_messages(with_categories=False):
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to `True`, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Example usage:
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
.. sourcecode:: html+jinja
* `with_categories` controls whether categories are returned with message
text (`True` gives a tuple, where `False` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
{% for category, msg in get_flashed_messages(with_categories=true) %}
<p class=flash-{{ category }}>{{ msg }}
{% endfor %}
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to `True` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = filter(lambda f: f[0] in category_filter, flashes)
if not with_categories:
return [x[1] for x in flashes]
return flashes
@ -492,14 +504,19 @@ def get_root_path(import_name):
Not to be confused with the package path returned by :func:`find_package`.
"""
__import__(import_name)
try:
directory = os.path.dirname(sys.modules[import_name].__file__)
return os.path.abspath(directory)
except AttributeError:
# this is necessary in case we are running from the interactive
# python shell. It will never be used for production code however
loader = pkgutil.get_loader(import_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
@ -510,25 +527,34 @@ def find_package(import_name):
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
__import__(import_name)
root_mod = sys.modules[import_name.split('.')[0]]
package_path = getattr(root_mod, '__file__', None)
if package_path is None:
# support for the interactive python shell
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
package_path = os.path.abspath(os.path.dirname(package_path))
if hasattr(root_mod, '__path__'):
package_path = os.path.dirname(package_path)
# leave the egg wrapper folder or the actual .egg on the filesystem
test_package_path = package_path
if os.path.basename(test_package_path).endswith('.egg'):
test_package_path = os.path.dirname(test_package_path)
site_parent, site_folder = os.path.split(test_package_path)
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if test_package_path.startswith(py_prefix):
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)

22
libs/flask/views.py

@ -3,7 +3,7 @@
flask.views
~~~~~~~~~~~
This module provides class based views inspired by the ones in Django.
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
@ -12,7 +12,7 @@ from .globals import request
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace'])
'delete', 'put', 'trace', 'patch'])
class View(object):
@ -50,7 +50,7 @@ class View(object):
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class based views is to decorate the
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
@ -70,10 +70,10 @@ class View(object):
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be
used with the routing system. What it does internally is generating
a function on the fly that will instanciate the :class:`View`
on each request and call the :meth:`dispatch_request` method on it.
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
@ -89,8 +89,8 @@ class View(object):
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class based
# view this thing came from, secondly it's also used for instanciating
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
@ -120,7 +120,7 @@ class MethodViewType(type):
class MethodView(View):
"""Like a regular class based view but that dispatches requests to
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
@ -146,5 +146,5 @@ class MethodView(View):
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Not implemented method %r' % request.method
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)

26
libs/jinja2/filters.py

@ -13,7 +13,8 @@ import math
from random import choice
from operator import itemgetter
from itertools import imap, groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
unicode_urlencode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError
@ -70,6 +71,26 @@ def do_forceescape(value):
return escape(unicode(value))
def do_urlencode(value):
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
itemiter = None
if isinstance(value, dict):
itemiter = value.iteritems()
elif not isinstance(value, basestring):
try:
itemiter = iter(value)
except TypeError:
pass
if itemiter is None:
return unicode_urlencode(value)
return u'&'.join(unicode_urlencode(k) + '=' +
unicode_urlencode(v) for k, v in itemiter)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
@ -797,5 +818,6 @@ FILTERS = {
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr
'xmlattr': do_xmlattr,
'urlencode': do_urlencode
}

17
libs/jinja2/runtime.py

@ -30,6 +30,8 @@ to_string = unicode
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
@ -270,6 +272,7 @@ class LoopContext(object):
def __init__(self, iterable, recurse=None):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
# try to get the length of the iterable early. This must be done
@ -288,7 +291,7 @@ class LoopContext(object):
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x.index0 + 1 == x.length)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
@ -299,6 +302,12 @@ class LoopContext(object):
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
@ -344,7 +353,11 @@ class LoopContextIterator(object):
def next(self):
ctx = self.context
ctx.index0 += 1
return next(ctx._iterator), ctx
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):

19
libs/jinja2/utils.py

@ -12,6 +12,10 @@ import re
import sys
import errno
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
from dummy_thread import allocate_lock
@ -349,6 +353,21 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8'):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, basestring):
obj = unicode(obj)
if isinstance(obj, unicode):
obj = obj.encode(charset)
return unicode(url_quote(obj))
class LRUCache(object):
"""A simple LRU Cache implementation."""

2
libs/migrate/__init__.py

@ -7,3 +7,5 @@
from migrate.versioning import *
from migrate.changeset import *
__version__ = '0.7.2'

2
libs/migrate/changeset/__init__.py

@ -13,7 +13,7 @@ from sqlalchemy import __version__ as _sa_version
warnings.simplefilter('always', DeprecationWarning)
_sa_version = tuple(int(re.match("\d+", x).group(0)) for x in _sa_version.split("."))
SQLA_06 = _sa_version >= (0, 6)
SQLA_07 = _sa_version >= (0, 7)
del re
del _sa_version

108
libs/migrate/changeset/ansisql.py

@ -17,23 +17,19 @@ from sqlalchemy.schema import (ForeignKeyConstraint,
Index)
from migrate import exceptions
from migrate.changeset import constraint, SQLA_06
from migrate.changeset import constraint
if not SQLA_06:
from sqlalchemy.sql.compiler import SchemaGenerator, SchemaDropper
else:
from sqlalchemy.schema import AddConstraint, DropConstraint
from sqlalchemy.sql.compiler import DDLCompiler
SchemaGenerator = SchemaDropper = DDLCompiler
from sqlalchemy.schema import AddConstraint, DropConstraint
from sqlalchemy.sql.compiler import DDLCompiler
SchemaGenerator = SchemaDropper = DDLCompiler
class AlterTableVisitor(SchemaVisitor):
"""Common operations for ``ALTER TABLE`` statements."""
if SQLA_06:
# engine.Compiler looks for .statement
# when it spawns off a new compiler
statement = ClauseElement()
# engine.Compiler looks for .statement
# when it spawns off a new compiler
statement = ClauseElement()
def append(self, s):
"""Append content to the SchemaIterator's query buffer."""
@ -116,16 +112,15 @@ class ANSIColumnGenerator(AlterTableVisitor, SchemaGenerator):
# SA bounds FK constraints to table, add manually
for fk in column.foreign_keys:
self.add_foreignkey(fk.constraint)
# add primary key constraint if needed
if column.primary_key_name:
cons = constraint.PrimaryKeyConstraint(column,
name=column.primary_key_name)
cons.create()
if SQLA_06:
def add_foreignkey(self, fk):
self.connection.execute(AddConstraint(fk))
def add_foreignkey(self, fk):
self.connection.execute(AddConstraint(fk))
class ANSIColumnDropper(AlterTableVisitor, SchemaDropper):
"""Extends ANSI SQL dropper for column dropping (``ALTER TABLE
@ -232,10 +227,7 @@ class ANSISchemaChanger(AlterTableVisitor, SchemaGenerator):
def _visit_column_type(self, table, column, delta):
type_ = delta['type']
if SQLA_06:
type_text = str(type_.compile(dialect=self.dialect))
else:
type_text = type_.dialect_impl(self.dialect).get_col_spec()
type_text = str(type_.compile(dialect=self.dialect))
self.append("TYPE %s" % type_text)
def _visit_column_name(self, table, column, delta):
@ -279,75 +271,17 @@ class ANSIConstraintCommon(AlterTableVisitor):
def visit_migrate_unique_constraint(self, *p, **k):
self._visit_constraint(*p, **k)
if SQLA_06:
class ANSIConstraintGenerator(ANSIConstraintCommon, SchemaGenerator):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(AddConstraint(constraint)))
self.execute()
class ANSIConstraintDropper(ANSIConstraintCommon, SchemaDropper):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(DropConstraint(constraint, cascade=constraint.cascade)))
self.execute()
else:
class ANSIConstraintGenerator(ANSIConstraintCommon, SchemaGenerator):
class ANSIConstraintGenerator(ANSIConstraintCommon, SchemaGenerator):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(AddConstraint(constraint)))
self.execute()
def get_constraint_specification(self, cons, **kwargs):
"""Constaint SQL generators.
We cannot use SA visitors because they append comma.
"""
if isinstance(cons, PrimaryKeyConstraint):
if cons.name is not None:
self.append("CONSTRAINT %s " % self.preparer.format_constraint(cons))
self.append("PRIMARY KEY ")
self.append("(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in cons))
self.define_constraint_deferrability(cons)
elif isinstance(cons, ForeignKeyConstraint):
self.define_foreign_key(cons)
elif isinstance(cons, CheckConstraint):
if cons.name is not None:
self.append("CONSTRAINT %s " %
self.preparer.format_constraint(cons))
self.append("CHECK (%s)" % cons.sqltext)
self.define_constraint_deferrability(cons)
elif isinstance(cons, UniqueConstraint):
if cons.name is not None:
self.append("CONSTRAINT %s " %
self.preparer.format_constraint(cons))
self.append("UNIQUE (%s)" % \
(', '.join(self.preparer.quote(c.name, c.quote) for c in cons)))
self.define_constraint_deferrability(cons)
else:
raise exceptions.InvalidConstraintError(cons)
def _visit_constraint(self, constraint):
table = self.start_alter_table(constraint)
constraint.name = self.get_constraint_name(constraint)
self.append("ADD ")
self.get_constraint_specification(constraint)
self.execute()
class ANSIConstraintDropper(ANSIConstraintCommon, SchemaDropper):
def _visit_constraint(self, constraint):
self.start_alter_table(constraint)
self.append("DROP CONSTRAINT ")
constraint.name = self.get_constraint_name(constraint)
self.append(self.preparer.format_constraint(constraint))
if constraint.cascade:
self.cascade_constraint(constraint)
self.execute()
def cascade_constraint(self, constraint):
self.append(" CASCADE")
class ANSIConstraintDropper(ANSIConstraintCommon, SchemaDropper):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(DropConstraint(constraint, cascade=constraint.cascade)))
self.execute()
class ANSIDialect(DefaultDialect):

3
libs/migrate/changeset/constraint.py

@ -4,7 +4,6 @@
from sqlalchemy import schema
from migrate.exceptions import *
from migrate.changeset import SQLA_06
class ConstraintChangeset(object):
"""Base class for Constraint classes."""
@ -165,8 +164,6 @@ class CheckConstraint(ConstraintChangeset, schema.CheckConstraint):
table = kwargs.pop('table', table)
schema.CheckConstraint.__init__(self, sqltext, *args, **kwargs)
if table is not None:
if not SQLA_06:
self.table = table
self._set_parent(table)
self.colnames = colnames

12
libs/migrate/changeset/databases/firebird.py

@ -4,13 +4,10 @@
from sqlalchemy.databases import firebird as sa_base
from sqlalchemy.schema import PrimaryKeyConstraint
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
from migrate.changeset import ansisql
if SQLA_06:
FBSchemaGenerator = sa_base.FBDDLCompiler
else:
FBSchemaGenerator = sa_base.FBSchemaGenerator
FBSchemaGenerator = sa_base.FBDDLCompiler
class FBColumnGenerator(FBSchemaGenerator, ansisql.ANSIColumnGenerator):
"""Firebird column generator implementation."""
@ -41,10 +38,7 @@ class FBColumnDropper(ansisql.ANSIColumnDropper):
# is deleted!
continue
if SQLA_06:
should_drop = column.name in cons.columns
else:
should_drop = cons.contains_column(column) and cons.name
should_drop = column.name in cons.columns
if should_drop:
self.start_alter_table(column)
self.append("DROP CONSTRAINT ")

43
libs/migrate/changeset/databases/mysql.py

@ -6,13 +6,10 @@ from sqlalchemy.databases import mysql as sa_base
from sqlalchemy import types as sqltypes
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
from migrate.changeset import ansisql
if not SQLA_06:
MySQLSchemaGenerator = sa_base.MySQLSchemaGenerator
else:
MySQLSchemaGenerator = sa_base.MySQLDDLCompiler
MySQLSchemaGenerator = sa_base.MySQLDDLCompiler
class MySQLColumnGenerator(MySQLSchemaGenerator, ansisql.ANSIColumnGenerator):
pass
@ -53,37 +50,11 @@ class MySQLSchemaChanger(MySQLSchemaGenerator, ansisql.ANSISchemaChanger):
class MySQLConstraintGenerator(ansisql.ANSIConstraintGenerator):
pass
if SQLA_06:
class MySQLConstraintDropper(MySQLSchemaGenerator, ansisql.ANSIConstraintDropper):
def visit_migrate_check_constraint(self, *p, **k):
raise exceptions.NotSupportedError("MySQL does not support CHECK"
" constraints, use triggers instead.")
else:
class MySQLConstraintDropper(ansisql.ANSIConstraintDropper):
def visit_migrate_primary_key_constraint(self, constraint):
self.start_alter_table(constraint)
self.append("DROP PRIMARY KEY")
self.execute()
def visit_migrate_foreign_key_constraint(self, constraint):
self.start_alter_table(constraint)
self.append("DROP FOREIGN KEY ")
constraint.name = self.get_constraint_name(constraint)
self.append(self.preparer.format_constraint(constraint))
self.execute()
def visit_migrate_check_constraint(self, *p, **k):
raise exceptions.NotSupportedError("MySQL does not support CHECK"
" constraints, use triggers instead.")
def visit_migrate_unique_constraint(self, constraint, *p, **k):
self.start_alter_table(constraint)
self.append('DROP INDEX ')
constraint.name = self.get_constraint_name(constraint)
self.append(self.preparer.format_constraint(constraint))
self.execute()
class MySQLConstraintDropper(MySQLSchemaGenerator, ansisql.ANSIConstraintDropper):
def visit_migrate_check_constraint(self, *p, **k):
raise exceptions.NotSupportedError("MySQL does not support CHECK"
" constraints, use triggers instead.")
class MySQLDialect(ansisql.ANSIDialect):

7
libs/migrate/changeset/databases/oracle.py

@ -5,13 +5,10 @@ import sqlalchemy as sa
from sqlalchemy.databases import oracle as sa_base
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
from migrate.changeset import ansisql
if not SQLA_06:
OracleSchemaGenerator = sa_base.OracleSchemaGenerator
else:
OracleSchemaGenerator = sa_base.OracleDDLCompiler
OracleSchemaGenerator = sa_base.OracleDDLCompiler
class OracleColumnGenerator(OracleSchemaGenerator, ansisql.ANSIColumnGenerator):

12
libs/migrate/changeset/databases/postgres.py

@ -3,14 +3,10 @@
.. _`PostgreSQL`: http://www.postgresql.org/
"""
from migrate.changeset import ansisql, SQLA_06
if not SQLA_06:
from sqlalchemy.databases import postgres as sa_base
PGSchemaGenerator = sa_base.PGSchemaGenerator
else:
from sqlalchemy.databases import postgresql as sa_base
PGSchemaGenerator = sa_base.PGDDLCompiler
from migrate.changeset import ansisql
from sqlalchemy.databases import postgresql as sa_base
PGSchemaGenerator = sa_base.PGDDLCompiler
class PGColumnGenerator(PGSchemaGenerator, ansisql.ANSIColumnGenerator):

10
libs/migrate/changeset/databases/sqlite.py

@ -9,13 +9,11 @@ from copy import copy
from sqlalchemy.databases import sqlite as sa_base
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
from migrate.changeset import ansisql
if not SQLA_06:
SQLiteSchemaGenerator = sa_base.SQLiteSchemaGenerator
else:
SQLiteSchemaGenerator = sa_base.SQLiteDDLCompiler
SQLiteSchemaGenerator = sa_base.SQLiteDDLCompiler
class SQLiteCommon(object):
@ -39,7 +37,7 @@ class SQLiteHelper(SQLiteCommon):
insertion_string = self._modify_table(table, column, delta)
table.create()
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')

32
libs/migrate/changeset/schema.py

@ -11,7 +11,7 @@ from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
from migrate.exceptions import *
from migrate.changeset import SQLA_06
from migrate.changeset import SQLA_07
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
@ -349,10 +349,7 @@ class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem):
def process_column(self, column):
"""Processes default values for column"""
# XXX: this is a snippet from SA processing of positional parameters
if not SQLA_06 and column.args:
toinit = list(column.args)
else:
toinit = list()
toinit = list()
if column.server_default is not None:
if isinstance(column.server_default, sqlalchemy.FetchedValue):
@ -367,9 +364,6 @@ class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem):
for_update=True))
if toinit:
column._init_items(*toinit)
if not SQLA_06:
column.args = []
def _get_table(self):
return getattr(self, '_table', None)
@ -469,14 +463,18 @@ class ChangesetTable(object):
self._set_parent(self.metadata)
def _meta_key(self):
"""Get the meta key for this table."""
return sqlalchemy.schema._get_table_key(self.name, self.schema)
def deregister(self):
"""Remove this table from its metadata"""
key = self._meta_key()
meta = self.metadata
if key in meta.tables:
del meta.tables[key]
if SQLA_07:
self.metadata._remove_table(self.name, self.schema)
else:
key = self._meta_key()
meta = self.metadata
if key in meta.tables:
del meta.tables[key]
class ChangesetColumn(object):
@ -555,7 +553,10 @@ populated with defaults
def add_to_table(self, table):
if table is not None and self.table is None:
self._set_parent(table)
if SQLA_07:
table.append_column(self)
else:
self._set_parent(table)
def _col_name_in_constraint(self,cons,name):
return False
@ -590,7 +591,10 @@ populated with defaults
table.constraints = table.constraints - to_drop
if table.c.contains_column(self):
table.c.remove(self)
if SQLA_07:
table._columns.remove(self)
else:
table.c.remove(self)
# TODO: this is fixed in 0.6
def copy_fixed(self, **kw):

17
libs/migrate/versioning/api.py

@ -110,19 +110,19 @@ def script(description, repository, **opts):
@catch_known_errors
def script_sql(database, repository, **opts):
"""%prog script_sql DATABASE REPOSITORY_PATH
def script_sql(database, description, repository, **opts):
"""%prog script_sql DATABASE DESCRIPTION REPOSITORY_PATH
Create empty change SQL scripts for given DATABASE, where DATABASE
is either specific ('postgres', 'mysql', 'oracle', 'sqlite', etc.)
is either specific ('postgresql', 'mysql', 'oracle', 'sqlite', etc.)
or generic ('default').
For instance, manage.py script_sql postgres creates:
repository/versions/001_postgres_upgrade.sql and
repository/versions/001_postgres_postgres.sql
For instance, manage.py script_sql postgresql description creates:
repository/versions/001_description_postgresql_upgrade.sql and
repository/versions/001_description_postgresql_downgrade.sql
"""
repo = Repository(repository)
repo.create_script_sql(database, **opts)
repo.create_script_sql(database, description, **opts)
def version(repository, **opts):
@ -212,14 +212,15 @@ def test(url, repository, **opts):
"""
engine = opts.pop('engine')
repos = Repository(repository)
script = repos.version(None).script()
# Upgrade
log.info("Upgrading...")
script = repos.version(None).script(engine.name, 'upgrade')
script.run(engine, 1)
log.info("done")
log.info("Downgrading...")
script = repos.version(None).script(engine.name, 'downgrade')
script.run(engine, -1)
log.info("done")
log.info("Success")

183
libs/migrate/versioning/genmodel.py

@ -1,9 +1,9 @@
"""
Code to generate a Python model from a database or differences
between a model and database.
Code to generate a Python model from a database or differences
between a model and database.
Some of this is borrowed heavily from the AutoCode project at:
http://code.google.com/p/sqlautocode/
Some of this is borrowed heavily from the AutoCode project at:
http://code.google.com/p/sqlautocode/
"""
import sys
@ -34,6 +34,13 @@ Base = declarative.declarative_base()
class ModelGenerator(object):
"""Various transformations from an A, B diff.
In the implementation, A tends to be called the model and B
the database (although this is not true of all diffs).
The diff is directionless, but transformations apply the diff
in a particular direction, described in the method name.
"""
def __init__(self, diff, engine, declarative=False):
self.diff = diff
@ -59,7 +66,7 @@ class ModelGenerator(object):
pass
else:
kwarg.append('default')
ks = ', '.join('%s=%r' % (k, getattr(col, k)) for k in kwarg)
args = ['%s=%r' % (k, getattr(col, k)) for k in kwarg]
# crs: not sure if this is good idea, but it gets rid of extra
# u''
@ -73,43 +80,38 @@ class ModelGenerator(object):
type_ = cls()
break
data = {
'name': name,
'type': type_,
'constraints': ', '.join([repr(cn) for cn in col.constraints]),
'args': ks and ks or ''}
type_repr = repr(type_)
if type_repr.endswith('()'):
type_repr = type_repr[:-2]
if data['constraints']:
if data['args']:
data['args'] = ',' + data['args']
constraints = [repr(cn) for cn in col.constraints]
if data['constraints'] or data['args']:
data['maybeComma'] = ','
else:
data['maybeComma'] = ''
data = {
'name': name,
'commonStuff': ', '.join([type_repr] + constraints + args),
}
commonStuff = """ %(maybeComma)s %(constraints)s %(args)s)""" % data
commonStuff = commonStuff.strip()
data['commonStuff'] = commonStuff
if self.declarative:
return """%(name)s = Column(%(type)r%(commonStuff)s""" % data
return """%(name)s = Column(%(commonStuff)s)""" % data
else:
return """Column(%(name)r, %(type)r%(commonStuff)s""" % data
return """Column(%(name)r, %(commonStuff)s)""" % data
def getTableDefn(self, table):
def _getTableDefn(self, table, metaName='meta'):
out = []
tableName = table.name
if self.declarative:
out.append("class %(table)s(Base):" % {'table': tableName})
out.append(" __tablename__ = '%(table)s'" % {'table': tableName})
out.append(" __tablename__ = '%(table)s'\n" %
{'table': tableName})
for col in table.columns:
out.append(" %s" % self.column_repr(col))
out.append(" %s" % self.column_repr(col))
out.append('\n')
else:
out.append("%(table)s = Table('%(table)s', meta," % \
{'table': tableName})
out.append("%(table)s = Table('%(table)s', %(meta)s," %
{'table': tableName, 'meta': metaName})
for col in table.columns:
out.append(" %s," % self.column_repr(col))
out.append(")")
out.append(" %s," % self.column_repr(col))
out.append(")\n")
return out
def _get_tables(self,missingA=False,missingB=False,modified=False):
@ -122,9 +124,15 @@ class ModelGenerator(object):
if bool_:
for name in names:
yield metadata.tables.get(name)
def toPython(self):
"""Assume database is current and model is empty."""
def genBDefinition(self):
"""Generates the source code for a definition of B.
Assumes a diff where A is empty.
Was: toPython. Assume database (B) is current and model (A) is empty.
"""
out = []
if self.declarative:
out.append(DECLARATIVE_HEADER)
@ -132,67 +140,89 @@ class ModelGenerator(object):
out.append(HEADER)
out.append("")
for table in self._get_tables(missingA=True):
out.extend(self.getTableDefn(table))
out.append("")
out.extend(self._getTableDefn(table))
return '\n'.join(out)
def toUpgradeDowngradePython(self, indent=' '):
''' Assume model is most current and database is out-of-date. '''
def genB2AMigration(self, indent=' '):
'''Generate a migration from B to A.
Was: toUpgradeDowngradePython
Assume model (A) is most current and database (B) is out-of-date.
'''
decls = ['from migrate.changeset import schema',
'meta = MetaData()']
for table in self._get_tables(
missingA=True,missingB=True,modified=True
):
decls.extend(self.getTableDefn(table))
upgradeCommands, downgradeCommands = [], []
for tableName in self.diff.tables_missing_from_A:
upgradeCommands.append("%(table)s.drop()" % {'table': tableName})
downgradeCommands.append("%(table)s.create()" % \
{'table': tableName})
for tableName in self.diff.tables_missing_from_B:
upgradeCommands.append("%(table)s.create()" % {'table': tableName})
downgradeCommands.append("%(table)s.drop()" % {'table': tableName})
for tableName in self.diff.tables_different:
dbTable = self.diff.metadataB.tables[tableName]
missingInDatabase, missingInModel, diffDecl = \
self.diff.colDiffs[tableName]
for col in missingInDatabase:
upgradeCommands.append('%s.columns[%r].create()' % (
modelTable, col.name))
downgradeCommands.append('%s.columns[%r].drop()' % (
modelTable, col.name))
for col in missingInModel:
upgradeCommands.append('%s.columns[%r].drop()' % (
modelTable, col.name))
downgradeCommands.append('%s.columns[%r].create()' % (
modelTable, col.name))
for modelCol, databaseCol, modelDecl, databaseDecl in diffDecl:
'pre_meta = MetaData()',
'post_meta = MetaData()',
]
upgradeCommands = ['pre_meta.bind = migrate_engine',
'post_meta.bind = migrate_engine']
downgradeCommands = list(upgradeCommands)
for tn in self.diff.tables_missing_from_A:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(pre_table, metaName='pre_meta'))
upgradeCommands.append(
"pre_meta.tables[%(table)r].drop()" % {'table': tn})
downgradeCommands.append(
"pre_meta.tables[%(table)r].create()" % {'table': tn})
for tn in self.diff.tables_missing_from_B:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(post_table, metaName='post_meta'))
upgradeCommands.append(
"post_meta.tables[%(table)r].create()" % {'table': tn})
downgradeCommands.append(
"post_meta.tables[%(table)r].drop()" % {'table': tn})
for (tn, td) in self.diff.tables_different.iteritems():
if td.columns_missing_from_A or td.columns_different:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(
pre_table, metaName='pre_meta'))
if td.columns_missing_from_B or td.columns_different:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(
post_table, metaName='post_meta'))
for col in td.columns_missing_from_A:
upgradeCommands.append(
'pre_meta.tables[%r].columns[%r].drop()' % (tn, col))
downgradeCommands.append(
'pre_meta.tables[%r].columns[%r].create()' % (tn, col))
for col in td.columns_missing_from_B:
upgradeCommands.append(
'post_meta.tables[%r].columns[%r].create()' % (tn, col))
downgradeCommands.append(
'post_meta.tables[%r].columns[%r].drop()' % (tn, col))
for modelCol, databaseCol, modelDecl, databaseDecl in td.columns_different:
upgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
modelTable, modelCol.name, databaseCol.name))
tn, modelCol.name, databaseCol.name))
downgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
modelTable, modelCol.name, databaseCol.name))
pre_command = ' meta.bind = migrate_engine'
tn, modelCol.name, databaseCol.name))
return (
'\n'.join(decls),
'\n'.join([pre_command] + ['%s%s' % (indent, line) for line in upgradeCommands]),
'\n'.join([pre_command] + ['%s%s' % (indent, line) for line in downgradeCommands]))
'\n'.join('%s%s' % (indent, line) for line in upgradeCommands),
'\n'.join('%s%s' % (indent, line) for line in downgradeCommands))
def _db_can_handle_this_change(self,td):
"""Check if the database can handle going from B to A."""
if (td.columns_missing_from_B
and not td.columns_missing_from_A
and not td.columns_different):
# Even sqlite can handle this.
# Even sqlite can handle column additions.
return True
else:
return not self.engine.url.drivername.startswith('sqlite')
def applyModel(self):
"""Apply model to current database."""
def runB2A(self):
"""Goes from B to A.
Was: applyModel. Apply model (A) to current database (B).
"""
meta = sqlalchemy.MetaData(self.engine)
@ -208,9 +238,9 @@ class ModelGenerator(object):
dbTable = self.diff.metadataB.tables[tableName]
td = self.diff.tables_different[tableName]
if self._db_can_handle_this_change(td):
for col in td.columns_missing_from_B:
modelTable.columns[col].create()
for col in td.columns_missing_from_A:
@ -252,3 +282,4 @@ class ModelGenerator(object):
except:
trans.rollback()
raise

15
libs/migrate/versioning/repository.py

@ -115,6 +115,7 @@ class Repository(pathed.Pathed):
options.setdefault('version_table', 'migrate_version')
options.setdefault('repository_id', name)
options.setdefault('required_dbs', [])
options.setdefault('use_timestamp_numbering', False)
tmpl = open(os.path.join(tmpl_dir, cls._config)).read()
ret = TempitaTemplate(tmpl).substitute(options)
@ -152,11 +153,14 @@ class Repository(pathed.Pathed):
def create_script(self, description, **k):
"""API to :meth:`migrate.versioning.version.Collection.create_new_python_version`"""
k['use_timestamp_numbering'] = self.use_timestamp_numbering
self.versions.create_new_python_version(description, **k)
def create_script_sql(self, database, **k):
def create_script_sql(self, database, description, **k):
"""API to :meth:`migrate.versioning.version.Collection.create_new_sql_version`"""
self.versions.create_new_sql_version(database, **k)
k['use_timestamp_numbering'] = self.use_timestamp_numbering
self.versions.create_new_sql_version(database, description, **k)
@property
def latest(self):
@ -173,6 +177,13 @@ class Repository(pathed.Pathed):
"""Returns repository id specified in config"""
return self.config.get('db_settings', 'repository_id')
@property
def use_timestamp_numbering(self):
"""Returns use_timestamp_numbering specified in config"""
if self.config.has_option('db_settings', 'use_timestamp_numbering'):
return self.config.getboolean('db_settings', 'use_timestamp_numbering')
return False
def version(self, *p, **k):
"""API to :attr:`migrate.versioning.version.Collection.version`"""
return self.versions.version(*p, **k)

21
libs/migrate/versioning/schema.py

@ -11,6 +11,7 @@ from sqlalchemy import exceptions as sa_exceptions
from sqlalchemy.sql import bindparam
from migrate import exceptions
from migrate.changeset import SQLA_07
from migrate.versioning import genmodel, schemadiff
from migrate.versioning.repository import Repository
from migrate.versioning.util import load_model
@ -57,14 +58,20 @@ class ControlledSchema(object):
"""
Remove version control from a database.
"""
try:
self.table.drop()
except (sa_exceptions.SQLError):
raise exceptions.DatabaseNotControlledError(str(self.table))
if SQLA_07:
try:
self.table.drop()
except sa_exceptions.DatabaseError:
raise exceptions.DatabaseNotControlledError(str(self.table))
else:
try:
self.table.drop()
except (sa_exceptions.SQLError):
raise exceptions.DatabaseNotControlledError(str(self.table))
def changeset(self, version=None):
"""API to Changeset creation.
Uses self.version for start version and engine.name
to get database name.
"""
@ -110,7 +117,7 @@ class ControlledSchema(object):
diff = schemadiff.getDiffOfModelAgainstDatabase(
model, self.engine, excludeTables=[self.repository.version_table]
)
genmodel.ModelGenerator(diff,self.engine).applyModel()
genmodel.ModelGenerator(diff,self.engine).runB2A()
self.update_repository_table(self.version, int(self.repository.latest))
@ -210,4 +217,4 @@ class ControlledSchema(object):
diff = schemadiff.getDiffOfModelAgainstDatabase(
MetaData(), engine, excludeTables=[repository.version_table]
)
return genmodel.ModelGenerator(diff, engine, declarative).toPython()
return genmodel.ModelGenerator(diff, engine, declarative).genBDefinition()

77
libs/migrate/versioning/schemadiff.py

@ -5,7 +5,6 @@
import logging
import sqlalchemy
from migrate.changeset import SQLA_06
from sqlalchemy.types import Float
log = logging.getLogger(__name__)
@ -17,8 +16,16 @@ def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None):
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
return SchemaDiff(metadata,
sqlalchemy.MetaData(engine, reflect=True),
db_metadata = sqlalchemy.MetaData(engine, reflect=True)
# sqlite will include a dynamically generated 'sqlite_sequence' table if
# there are autoincrement sequences in the database; this should not be
# compared.
if engine.dialect.name == 'sqlite':
if 'sqlite_sequence' in db_metadata.tables:
db_metadata.remove(db_metadata.tables['sqlite_sequence'])
return SchemaDiff(metadata, db_metadata,
labelA='model',
labelB='database',
excludeTables=excludeTables)
@ -39,11 +46,11 @@ class ColDiff(object):
Container for differences in one :class:`~sqlalchemy.schema.Column`
between two :class:`~sqlalchemy.schema.Table` instances, ``A``
and ``B``.
.. attribute:: col_A
The :class:`~sqlalchemy.schema.Column` object for A.
.. attribute:: col_B
The :class:`~sqlalchemy.schema.Column` object for B.
@ -51,15 +58,15 @@ class ColDiff(object):
.. attribute:: type_A
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
object in A.
.. attribute:: type_B
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
object in A.
"""
diff = False
def __init__(self,col_A,col_B):
@ -87,10 +94,10 @@ class ColDiff(object):
if not (A is None or B is None) and A!=B:
self.diff=True
return
def __nonzero__(self):
return self.diff
class TableDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Table`
@ -101,12 +108,12 @@ class TableDiff(object):
A sequence of column names that were found in B but weren't in
A.
.. attribute:: columns_missing_from_B
A sequence of column names that were found in A but weren't in
B.
.. attribute:: columns_different
A dictionary containing information about columns that were
@ -126,7 +133,7 @@ class TableDiff(object):
self.columns_missing_from_B or
self.columns_different
)
class SchemaDiff(object):
"""
Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
@ -139,34 +146,34 @@ class SchemaDiff(object):
The length of a :class:`SchemaDiff` will give the number of
changes found, enabling it to be used much like a boolean in
expressions.
:param metadataA:
First :class:`~sqlalchemy.schema.MetaData` to compare.
:param metadataB:
Second :class:`~sqlalchemy.schema.MetaData` to compare.
:param labelA:
The label to use in messages about the first
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
The label to use in messages about the second
:class:`~sqlalchemy.schema.MetaData`.
:class:`~sqlalchemy.schema.MetaData`.
:param excludeTables:
A sequence of table names to exclude.
.. attribute:: tables_missing_from_A
A sequence of table names that were found in B but weren't in
A.
.. attribute:: tables_missing_from_B
A sequence of table names that were found in A but weren't in
B.
.. attribute:: tables_different
A dictionary containing information about tables that were found
@ -195,26 +202,26 @@ class SchemaDiff(object):
self.tables_missing_from_B = sorted(
A_table_names - B_table_names - excludeTables
)
self.tables_different = {}
for table_name in A_table_names.intersection(B_table_names):
td = TableDiff()
A_table = metadataA.tables[table_name]
B_table = metadataB.tables[table_name]
A_column_names = set(A_table.columns.keys())
B_column_names = set(B_table.columns.keys())
td.columns_missing_from_A = sorted(
B_column_names - A_column_names
)
td.columns_missing_from_B = sorted(
A_column_names - B_column_names
)
td.columns_different = {}
for col_name in A_column_names.intersection(B_column_names):
@ -226,7 +233,7 @@ class SchemaDiff(object):
if cd:
td.columns_different[col_name]=cd
# XXX - index and constraint differences should
# be checked for here
@ -237,7 +244,7 @@ class SchemaDiff(object):
''' Summarize differences. '''
out = []
column_template =' %%%is: %%r' % self.label_width
for names,label in (
(self.tables_missing_from_A,self.labelA),
(self.tables_missing_from_B,self.labelB),
@ -248,7 +255,7 @@ class SchemaDiff(object):
label,', '.join(sorted(names))
)
)
for name,td in sorted(self.tables_different.items()):
out.append(
' table with differences: %s' % name
@ -267,7 +274,7 @@ class SchemaDiff(object):
out.append(' column with differences: %s' % name)
out.append(column_template % (self.labelA,cd.col_A))
out.append(column_template % (self.labelB,cd.col_B))
if out:
out.insert(0, 'Schema diffs:')
return '\n'.join(out)

16
libs/migrate/versioning/script/py.py

@ -25,7 +25,7 @@ class PythonScript(base.BaseScript):
@classmethod
def create(cls, path, **opts):
"""Create an empty migration script at specified path
:returns: :class:`PythonScript instance <migrate.versioning.script.py.PythonScript>`"""
cls.require_notfound(path)
@ -38,7 +38,7 @@ class PythonScript(base.BaseScript):
def make_update_script_for_model(cls, engine, oldmodel,
model, repository, **opts):
"""Create a migration script based on difference between two SA models.
:param repository: path to migrate repository
:param oldmodel: dotted.module.name:SAClass or SAClass object
:param model: dotted.module.name:SAClass or SAClass object
@ -50,7 +50,7 @@ class PythonScript(base.BaseScript):
:returns: Upgrade / Downgrade script
:rtype: string
"""
if isinstance(repository, basestring):
# oh dear, an import cycle!
from migrate.versioning.repository import Repository
@ -61,12 +61,12 @@ class PythonScript(base.BaseScript):
# Compute differences.
diff = schemadiff.getDiffOfModelAgainstModel(
oldmodel,
model,
oldmodel,
excludeTables=[repository.version_table])
# TODO: diff can be False (there is no difference?)
decls, upgradeCommands, downgradeCommands = \
genmodel.ModelGenerator(diff,engine).toUpgradeDowngradePython()
genmodel.ModelGenerator(diff,engine).genB2AMigration()
# Store differences into file.
src = Template(opts.pop('templates_path', None)).get_script(opts.pop('templates_theme', None))
@ -86,7 +86,7 @@ class PythonScript(base.BaseScript):
@classmethod
def verify_module(cls, path):
"""Ensure path is a valid script
:param path: Script location
:type path: string
:raises: :exc:`InvalidScriptError <migrate.exceptions.InvalidScriptError>`
@ -101,7 +101,7 @@ class PythonScript(base.BaseScript):
return module
def preview_sql(self, url, step, **args):
"""Mocks SQLAlchemy Engine to store all executed calls in a string
"""Mocks SQLAlchemy Engine to store all executed calls in a string
and runs :meth:`PythonScript.run <migrate.versioning.script.py.PythonScript.run>`
:returns: SQL file
@ -119,7 +119,7 @@ class PythonScript(base.BaseScript):
return go(url, step, **args)
def run(self, engine, step):
"""Core method of Script file.
"""Core method of Script file.
Exectues :func:`update` or :func:`downgrade` functions
:param engine: SQLAlchemy Engine

1
libs/migrate/versioning/template.py

@ -38,7 +38,6 @@ class Template(pathed.Pathed):
if `path` is not provided.
"""
pkg = 'migrate.versioning.templates'
_manage = 'manage.py_tmpl'
def __new__(cls, path=None):
if path is None:

5
libs/migrate/versioning/templates/manage.py_tmpl

@ -1,5 +0,0 @@
#!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(%(defaults)s)

4
libs/migrate/versioning/templates/manage/default.py_tmpl

@ -7,4 +7,6 @@ del _vars['__template_name__']
_vars.pop('repository_name', None)
defaults = ", ".join(["%s='%s'" % var for var in _vars.iteritems()])
}}
main({{ defaults }})
if __name__ == '__main__':
main({{ defaults }})

3
libs/migrate/versioning/templates/manage/pylons.py_tmpl

@ -26,4 +26,5 @@ conf_dict = ConfigLoader(conf_path).parser._sections['app:main']
# migrate supports passing url as an existing Engine instance (since 0.6.0)
# usage: migrate -c path/to/config.ini COMMANDS
main(url=engine_from_config(conf_dict), repository=migrations.__path__[0],{{ defaults }})
if __name__ == '__main__':
main(url=engine_from_config(conf_dict), repository=migrations.__path__[0],{{ defaults }})

5
libs/migrate/versioning/templates/repository/default/migrate.cfg

@ -18,3 +18,8 @@ version_table={{ locals().pop('version_table') }}
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs={{ locals().pop('required_dbs') }}
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering={{ locals().pop('use_timestamp_numbering') }}

5
libs/migrate/versioning/templates/repository/pylons/migrate.cfg

@ -18,3 +18,8 @@ version_table={{ locals().pop('version_table') }}
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs={{ locals().pop('required_dbs') }}
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering={{ locals().pop('use_timestamp_numbering') }}

6
libs/migrate/versioning/templates/script/default.py_tmpl

@ -1,11 +1,13 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind migrate_engine
# to your metadata
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pass
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pass

6
libs/migrate/versioning/templates/script/pylons.py_tmpl

@ -1,11 +1,13 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind migrate_engine
# to your metadata
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pass
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pass

39
libs/migrate/versioning/version.py

@ -8,6 +8,7 @@ import logging
from migrate import exceptions
from migrate.versioning import pathed, script
from datetime import datetime
log = logging.getLogger(__name__)
@ -88,9 +89,15 @@ class Collection(pathed.Pathed):
""":returns: Latest version in Collection"""
return max([VerNum(0)] + self.versions.keys())
def _next_ver_num(self, use_timestamp_numbering):
if use_timestamp_numbering == True:
return VerNum(int(datetime.utcnow().strftime('%Y%m%d%H%M%S')))
else:
return self.latest + 1
def create_new_python_version(self, description, **k):
"""Create Python files for new version"""
ver = self.latest + 1
ver = self._next_ver_num(k.pop('use_timestamp_numbering', False))
extra = str_to_filename(description)
if extra:
@ -105,14 +112,22 @@ class Collection(pathed.Pathed):
script.PythonScript.create(filepath, **k)
self.versions[ver] = Version(ver, self.path, [filename])
def create_new_sql_version(self, database, **k):
def create_new_sql_version(self, database, description, **k):
"""Create SQL files for new version"""
ver = self.latest + 1
ver = self._next_ver_num(k.pop('use_timestamp_numbering', False))
self.versions[ver] = Version(ver, self.path, [])
extra = str_to_filename(description)
if extra:
if extra == '_':
extra = ''
elif not extra.startswith('_'):
extra = '_%s' % extra
# Create new files.
for op in ('upgrade', 'downgrade'):
filename = '%03d_%s_%s.sql' % (ver, database, op)
filename = '%03d%s_%s_%s.sql' % (ver, extra, database, op)
filepath = self._version_path(filename)
script.SqlScript.create(filepath, **k)
self.versions[ver].add_script(filepath)
@ -176,18 +191,26 @@ class Version(object):
elif path.endswith(Extensions.sql):
self._add_script_sql(path)
SQL_FILENAME = re.compile(r'^(\d+)_([^_]+)_([^_]+).sql')
SQL_FILENAME = re.compile(r'^.*\.sql')
def _add_script_sql(self, path):
basename = os.path.basename(path)
match = self.SQL_FILENAME.match(basename)
if match:
version, dbms, op = match.group(1), match.group(2), match.group(3)
basename = basename.replace('.sql', '')
parts = basename.split('_')
if len(parts) < 3:
raise exceptions.ScriptError(
"Invalid SQL script name %s " % basename + \
"(needs to be ###_description_database_operation.sql)")
version = parts[0]
op = parts[-1]
dbms = parts[-2]
else:
raise exceptions.ScriptError(
"Invalid SQL script name %s " % basename + \
"(needs to be ###_database_operation.sql)")
"(needs to be ###_description_database_operation.sql)")
# File the script into a dictionary
self.sql.setdefault(dbms, {})[op] = script.SqlScript(path)

1542
libs/pytwitter/__init__.py

File diff suppressed because it is too large

4
libs/requests/__init__.py

@ -15,8 +15,8 @@ requests
"""
__title__ = 'requests'
__version__ = '0.9.1'
__build__ = 0x000901
__version__ = '0.10.1'
__build__ = 0x001001
__author__ = 'Kenneth Reitz'
__license__ = 'ISC'
__copyright__ = 'Copyright 2012 Kenneth Reitz'

3
libs/requests/api.py

@ -32,9 +32,10 @@ def request(method, url, **kwargs):
:param session: (optional) A :class:`Session` object to be used for the request.
:param config: (optional) A configuration dictionary.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
"""
s = kwargs.get('session') or sessions.session()
s = kwargs.pop('session') if 'session' in kwargs else sessions.session()
return s.request(method=method, url=url, **kwargs)

30
libs/requests/async.py

@ -46,15 +46,15 @@ def patched(f):
return wrapped
def send(r, pools=None):
"""Sends a given Request object."""
def send(r, pool=None):
"""Sends the request object using the specified pool. If a pool isn't
specified this method blocks. Pools are useful because you can specify size
and can hence limit concurrency."""
if pools:
r._pools = pools
if pool != None:
return pool.spawn(r.send)
r.send()
return r.response
return gevent.spawn(r.send)
# Patched requests.api functions.
@ -78,19 +78,11 @@ def map(requests, prefetch=True, size=None):
requests = list(requests)
if size:
pool = Pool(size)
pool.map(send, requests)
pool.join()
else:
jobs = [gevent.spawn(send, r) for r in requests]
gevent.joinall(jobs)
pool = Pool(size) if size else None
jobs = [send(r, pool) for r in requests]
gevent.joinall(jobs)
if prefetch:
[r.response.content for r in requests]
return [r.response for r in requests]
return [r.response for r in requests]

40
libs/requests/auth.py

@ -7,19 +7,21 @@ requests.auth
This module contains the authentication handlers for Requests.
"""
from __future__ import unicode_literals
import time
import hashlib
from base64 import b64encode
from urlparse import urlparse
from .compat import urlparse, str, bytes
from .utils import randombytes, parse_dict_header
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic %s' % b64encode('%s:%s' % (username, password))
return 'Basic ' + b64encode(("%s:%s" % (username, password)).encode('utf-8')).strip().decode('utf-8')
class AuthBase(object):
@ -32,8 +34,8 @@ class AuthBase(object):
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = str(username)
self.password = str(password)
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
@ -74,9 +76,17 @@ class HTTPDigestAuth(AuthBase):
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
def h(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
H = h
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
def h(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
H = h
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
@ -86,7 +96,9 @@ class HTTPDigestAuth(AuthBase):
# XXX not implemented yet
entdig = None
p_parsed = urlparse(r.request.url)
path = p_parsed.path + p_parsed.query
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (r.request.method, path)
@ -99,10 +111,12 @@ class HTTPDigestAuth(AuthBase):
last_nonce = nonce
ncvalue = '%08x' % nonce_count
cnonce = (hashlib.sha1("%s:%s:%s:%s" % (
nonce_count, nonce, time.ctime(), randombytes(8)))
.hexdigest()[:16]
)
s = str(nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += randombytes(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
@ -132,5 +146,5 @@ class HTTPDigestAuth(AuthBase):
return r
def __call__(self, r):
r.hooks['response'] = self.handle_401
r.register_hook('response', self.handle_401)
return r

102
libs/requests/compat.py

@ -0,0 +1,102 @@
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit
from urllib2 import parse_http_list
import cookielib
from .packages.oreos.monkeys import SimpleCookie
from StringIO import StringIO
str = unicode
bytes = str
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import SimpleCookie
from io import StringIO
str = str
bytes = bytes

6
libs/requests/defaults.py

@ -10,16 +10,13 @@ Configurations:
:base_headers: Default HTTP headers.
:verbose: Stream to write request logging to.
:timeout: Seconds until request timeout.
:max_redirects: Maximum number of redirects allowed within a request.
:decode_unicode: Decode unicode responses automatically?
:max_redirects: Maximum number of redirects allowed within a request.s
:keep_alive: Reuse HTTP Connections?
:max_retries: The number of times a request should be retried in the event of a connection failure.
:danger_mode: If true, Requests will raise errors immediately.
:safe_mode: If true, Requests will catch all errors.
:pool_maxsize: The maximium size of an HTTP connection pool.
:pool_connections: The number of active HTTP connection pools to use.
"""
from . import __version__
@ -35,7 +32,6 @@ defaults['base_headers'] = {
defaults['verbose'] = None
defaults['max_redirects'] = 30
defaults['decode_unicode'] = True
defaults['pool_connections'] = 10
defaults['pool_maxsize'] = 10
defaults['max_retries'] = 0

18
libs/requests/hooks.py

@ -22,7 +22,10 @@ Available hooks:
"""
import warnings
import traceback
HOOKS = ('args', 'pre_request', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
@ -31,10 +34,15 @@ def dispatch_hook(key, hooks, hook_data):
hooks = hooks or dict()
if key in hooks:
try:
return hooks.get(key).__call__(hook_data) or hook_data
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
except Exception, why:
warnings.warn(str(why))
for hook in hooks:
try:
hook_data = hook(hook_data) or hook_data
except Exception:
traceback.print_exc()
return hook_data

218
libs/requests/models.py

@ -8,15 +8,12 @@ This module contains the primary objects that power Requests.
"""
import os
import urllib
from urlparse import urlparse, urlunparse, urljoin, urlsplit
from datetime import datetime
from .hooks import dispatch_hook
from .hooks import dispatch_hook, HOOKS
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .packages import oreos
from .auth import HTTPBasicAuth, HTTPProxyAuth
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.exceptions import MaxRetryError
@ -29,8 +26,15 @@ from .exceptions import (
URLRequired, SSLError)
from .utils import (
get_encoding_from_headers, stream_decode_response_unicode,
stream_decompress, guess_filename, requote_path)
stream_decompress, guess_filename, requote_path, dict_from_string)
from .compat import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, str, bytes, SimpleCookie, is_py3, is_py2
# Import chardet if it is available.
try:
import chardet
except ImportError:
pass
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
@ -57,13 +61,19 @@ class Request(object):
hooks=None,
config=None,
_poolmanager=None,
verify=None):
verify=None,
session=None):
#: Float describes the timeout of the request.
# (Use socket.setdefaulttimeout() as fallback)
self.timeout = timeout
#: Request URL.
# if isinstance(url, str):
# url = url.encode('utf-8')
# print(dir(url))
self.url = url
#: Dictionary of HTTP Headers to attach to the :class:`Request <Request>`.
@ -82,7 +92,6 @@ class Request(object):
#: Dictionary or byte of querystring data to attach to the
#: :class:`Request <Request>`.
self.params = None
self.params = dict(params or [])
#: True if :class:`Request <Request>` is part of a redirect chain (disables history
#: and HTTPError storage).
@ -114,10 +123,18 @@ class Request(object):
self.sent = False
#: Event-handling hooks.
self.hooks = hooks
self.hooks = {}
for event in HOOKS:
self.hooks[event] = []
hooks = hooks or {}
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
#: Session.
self.session = None
self.session = session
#: SSL Verification.
self.verify = verify
@ -128,7 +145,7 @@ class Request(object):
headers = CaseInsensitiveDict()
# Add configured base headers.
for (k, v) in self.config.get('base_headers', {}).items():
for (k, v) in list(self.config.get('base_headers', {}).items()):
if k not in headers:
headers[k] = v
@ -144,7 +161,7 @@ class Request(object):
return '<Request [%s]>' % (self.method)
def _build_response(self, resp, is_error=False):
def _build_response(self, resp):
"""Build internal :class:`Response <Response>` object
from given response.
"""
@ -173,7 +190,7 @@ class Request(object):
# Add new cookies from the server.
if 'set-cookie' in response.headers:
cookie_header = response.headers['set-cookie']
cookies = oreos.dict_from_string(cookie_header)
cookies = dict_from_string(cookie_header)
# Save cookies in Response.
response.cookies = cookies
@ -183,10 +200,6 @@ class Request(object):
# Save original response for later.
response.raw = resp
if is_error:
response.error = resp
response.url = self.full_url
return response
@ -247,7 +260,8 @@ class Request(object):
timeout=self.timeout,
_poolmanager=self._poolmanager,
proxies = self.proxies,
verify = self.verify
verify = self.verify,
session = self.session
)
request.send()
@ -274,16 +288,17 @@ class Request(object):
returns it twice.
"""
if hasattr(data, '__iter__'):
if hasattr(data, '__iter__') and not isinstance(data, str):
data = dict(data)
if hasattr(data, 'items'):
result = []
for k, vs in data.items():
for k, vs in list(data.items()):
for v in isinstance(vs, list) and vs or [vs]:
result.append((k.encode('utf-8') if isinstance(k, unicode) else k,
v.encode('utf-8') if isinstance(v, unicode) else v))
return result, urllib.urlencode(result, doseq=True)
result.append((k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return result, urlencode(result, doseq=True)
else:
return data, data
@ -294,20 +309,27 @@ class Request(object):
if not self.url:
raise URLRequired()
url = self.url
# Support for unicode domain names and paths.
scheme, netloc, path, params, query, fragment = urlparse(self.url)
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise ValueError()
raise ValueError("Invalid URL %r: No schema supplied" % url)
netloc = netloc.encode('idna')
netloc = netloc.encode('idna').decode('utf-8')
if isinstance(path, unicode):
path = path.encode('utf-8')
if is_py2:
if isinstance(path, str):
path = path.encode('utf-8')
path = requote_path(path)
path = requote_path(path)
url = str(urlunparse([ scheme, netloc, path, params, query, fragment ]))
# print([ scheme, netloc, path, params, query, fragment ])
# print('---------------------')
url = (urlunparse([ scheme, netloc, path, params, query, fragment ]))
if self._enc_params:
if urlparse(url).query:
@ -332,6 +354,10 @@ class Request(object):
path = p.path
if not path:
path = '/'
# if is_py3:
path = quote(path.encode('utf-8'))
url.append(path)
query = p.query
@ -339,9 +365,16 @@ class Request(object):
url.append('?')
url.append(query)
# print(url)
return ''.join(url)
def register_hook(self, event, hook):
"""Properly register a hook."""
return self.hooks[event].append(hook)
def send(self, anyway=False, prefetch=False):
"""Sends the request. Returns True of successful, false if not.
@ -369,14 +402,14 @@ class Request(object):
# Multi-part file uploads.
if self.files:
if not isinstance(self.data, basestring):
if not isinstance(self.data, str):
try:
fields = self.data.copy()
except AttributeError:
fields = dict(self.data)
for (k, v) in self.files.items():
for (k, v) in list(self.files.items()):
# support for explicit filename
if isinstance(v, (tuple, list)):
fn, fp = v
@ -393,7 +426,7 @@ class Request(object):
if self.data:
body = self._enc_data
if isinstance(self.data, basestring):
if isinstance(self.data, str):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
@ -454,6 +487,9 @@ class Request(object):
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if not self.sent or anyway:
@ -463,8 +499,8 @@ class Request(object):
if 'cookie' not in self.headers:
# Simple cookie with our dict.
c = oreos.monkeys.SimpleCookie()
for (k, v) in self.cookies.items():
c = SimpleCookie()
for (k, v) in list(self.cookies.items()):
c[k] = v
# Turn it into a header.
@ -493,16 +529,16 @@ class Request(object):
)
self.sent = True
except MaxRetryError, e:
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError), e:
except (_SSLError, _HTTPError) as e:
if self.verify and isinstance(e, _SSLError):
raise SSLError(e)
raise Timeout('Request timed out.')
except RequestException, e:
except RequestException as e:
if self.config.get('safe_mode', False):
# In safe mode, catch the exception and attach it to
# a blank urllib3.HTTPResponse object.
@ -524,7 +560,7 @@ class Request(object):
if prefetch:
# Save the response.
self.response.content
if self.config.get('danger_mode'):
self.response.raise_for_status()
@ -581,6 +617,10 @@ class Response(object):
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
@ -594,7 +634,7 @@ class Response(object):
return True
def iter_content(self, chunk_size=10 * 1024, decode_unicode=None):
def iter_content(self, chunk_size=10 * 1024, decode_unicode=False):
"""Iterates over the response data. This avoids reading the content
at once into memory for large responses. The chunk size is the number
of bytes it should read into memory. This is not necessarily the
@ -613,16 +653,41 @@ class Response(object):
yield chunk
self._content_consumed = True
gen = generate()
def generate_chunked():
resp = self.raw._original_response
fp = resp.fp
if resp.chunk_left is not None:
pending_bytes = resp.chunk_left
while pending_bytes:
chunk = fp.read(min(chunk_size, pending_bytes))
pending_bytes-=len(chunk)
yield chunk
fp.read(2) # throw away crlf
while 1:
#XXX correct line size? (httplib has 64kb, seems insane)
pending_bytes = fp.readline(40).strip()
pending_bytes = int(pending_bytes, 16)
if pending_bytes == 0:
break
while pending_bytes:
chunk = fp.read(min(chunk_size, pending_bytes))
pending_bytes-=len(chunk)
yield chunk
fp.read(2) # throw away crlf
self._content_consumed = True
fp.close()
if getattr(getattr(self.raw, '_original_response', None), 'chunked', False):
gen = generate_chunked()
else:
gen = generate()
if 'gzip' in self.headers.get('content-encoding', ''):
gen = stream_decompress(gen, mode='gzip')
elif 'deflate' in self.headers.get('content-encoding', ''):
gen = stream_decompress(gen, mode='deflate')
if decode_unicode is None:
decode_unicode = self.config.get('decode_unicode')
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
@ -635,15 +700,29 @@ class Response(object):
responses.
"""
#TODO: why rstrip by default
pending = None
for chunk in self.iter_content(chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines(True)
for line in lines[:-1]:
yield line.rstrip()
# Save the last part of the chunk for next iteration, to keep full line together
pending = lines[-1]
# lines may be empty for the last chunk of a chunked response
if lines:
pending = lines[-1]
#if pending is a complete line, give it baack
if pending[-1] == '\n':
yield pending.rstrip()
pending = None
else:
pending = None
# Yield the last line
if pending is not None:
@ -652,9 +731,7 @@ class Response(object):
@property
def content(self):
"""Content of the response, in bytes or unicode
(if available).
"""
"""Content of the response, in bytes."""
if self._content is None:
# Read the contents.
@ -667,26 +744,45 @@ class Response(object):
except AttributeError:
self._content = None
content = self._content
self._content_consumed = True
return self._content
# Decode unicode content.
if self.config.get('decode_unicode'):
# Try charset from content-type
@property
def text(self):
"""Content of the response, in unicode.
if self.encoding:
try:
content = unicode(content, self.encoding)
except UnicodeError:
pass
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
# Fallback to auto-detected encoding if chardet is available.
if self.encoding is None:
try:
detected = chardet.detect(self.content) or {}
encoding = detected.get('encoding')
# Trust that chardet isn't available or something went terribly wrong.
except Exception:
pass
# Fall back:
# Decode unicode from given encoding.
try:
content = str(self.content, encoding)
except (UnicodeError, TypeError):
pass
# Try to fall back:
if not content:
try:
content = unicode(content, self.encoding, errors='replace')
except TypeError:
content = str(content, encoding, errors='replace')
except (UnicodeError, TypeError):
pass
self._content_consumed = True
return content

2
libs/requests/packages/oreos/monkeys.py

@ -318,7 +318,7 @@ _Translator = {
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
_idmap = ''.join(chr(x) for x in range(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):

4
libs/requests/packages/urllib3/__init__.py

@ -1,5 +1,5 @@
# urllib3/__init__.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -10,7 +10,7 @@ urllib3 - Thread-safe connection pooling and re-using.
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.0.2'
__version__ = '1.1'
from .connectionpool import (

2
libs/requests/packages/urllib3/_collections.py

@ -1,5 +1,5 @@
# urllib3/_collections.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

124
libs/requests/packages/urllib3/connectionpool.py

@ -1,5 +1,5 @@
# urllib3/connectionpool.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -7,15 +7,27 @@
import logging
import socket
from httplib import HTTPConnection, HTTPSConnection, HTTPException
from Queue import Queue, Empty, Full
from select import select
from socket import error as SocketError, timeout as SocketTimeout
from .packages.ssl_match_hostname import match_hostname, CertificateError
try:
from select import poll, POLLIN
except ImportError: # Doesn't exist on OSX and other platforms
from select import select
poll = False
try: # Python 3
from http.client import HTTPConnection, HTTPSConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPSConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import Queue, Empty, Full
except ImportError:
from Queue import Queue, Empty, Full
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except ImportError:
@ -23,21 +35,29 @@ except ImportError:
BaseSSLError = None
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .request import RequestMethods
from .response import HTTPResponse
from .exceptions import (
SSLError,
from .exceptions import (SSLError,
MaxRetryError,
TimeoutError,
HostChangedError,
EmptyPoolError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
@ -81,7 +101,16 @@ class ConnectionPool(object):
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
pass
scheme = None
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
@ -169,14 +198,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
conn = self.pool.get(block=self.block, timeout=timeout)
# If this is a persistent connection, check if it got disconnected
if conn and conn.sock and select([conn.sock], [], [], 0.0)[0]:
# Either data is buffered (bad), or the connection is dropped.
if conn and conn.sock and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
except Empty:
if self.block:
raise EmptyPoolError("Pool reached maximum size and no more "
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
@ -229,11 +258,17 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
conncetion pool.
connection pool.
"""
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (url.startswith('/') or
get_host(url) == (self.scheme, self.host, self.port))
(scheme, host, port) == (self.scheme, self.host, self.port))
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
@ -306,7 +341,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
headers = self.headers
if retries < 0:
raise MaxRetryError("Max retries exceeded for url: %s" % url)
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
@ -320,8 +355,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError("Connection pool with host '%s' tried to "
"open a foreign host: %s" % (host, url))
raise HostChangedError(self, url, retries - 1)
conn = None
@ -352,27 +386,29 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except (Empty), e:
except Empty as e:
# Timed out by queue
raise TimeoutError("Request timed out. (pool_timeout=%s)" %
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except (SocketTimeout), e:
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError("Request timed out. (timeout=%s)" %
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except (BaseSSLError), e:
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except (CertificateError), e:
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError), e:
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
finally:
if conn and release_conn:
@ -381,19 +417,16 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if not conn:
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, e, url))
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host) # Try again
# Handle redirection
if (redirect and
response.status in [301, 302, 303, 307] and
'location' in response.headers): # Redirect, retry
log.info("Redirecting %s -> %s" %
(url, response.headers.get('location')))
return self.urlopen(method, response.headers.get('location'), body,
headers, retries - 1, redirect,
assert_same_host)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host)
return response
@ -550,3 +583,22 @@ def connection_from_url(url, **kw):
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
``HTTPConnection`` object.
"""
if not poll:
return select([conn.sock], [], [], 0.0)[0]
# This version is better on platforms that support it.
p = poll()
p.register(conn.sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == conn.sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True

41
libs/requests/packages/urllib3/exceptions.py

@ -1,35 +1,54 @@
# urllib3/exceptions.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Exceptions
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class SSLError(Exception):
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class MaxRetryError(HTTPError):
"Raised when the maximum number of retries is exceeded."
pass
## Leaf Exceptions
class MaxRetryError(PoolError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url):
PoolError.__init__(self, pool,
"Max retries exceeded with url: %s" % url)
class TimeoutError(HTTPError):
"Raised when a socket timeout occurs."
pass
self.url = url
class HostChangedError(HTTPError):
class HostChangedError(PoolError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
PoolError.__init__(self, pool,
"Tried to open a foreign host with url: %s" % url)
self.url = url
self.retries = retries
class TimeoutError(PoolError):
"Raised when a socket timeout occurs."
pass
class EmptyPoolError(HTTPError):
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass

33
libs/requests/packages/urllib3/filepost.py

@ -1,18 +1,21 @@
# urllib3/filepost.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetools
import mimetypes
try:
from cStringIO import StringIO
from mimetools import choose_boundary
except ImportError:
from StringIO import StringIO # pylint: disable-msg=W0404
from .packages.mimetools_choose_boundary import choose_boundary
from io import BytesIO
from .packages import six
from .packages.six import b
writer = codecs.lookup('utf-8')[3]
@ -35,37 +38,37 @@ def encode_multipart_formdata(fields, boundary=None):
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = StringIO()
body = BytesIO()
if boundary is None:
boundary = mimetools.choose_boundary()
boundary = choose_boundary()
for fieldname, value in fields.iteritems():
body.write('--%s\r\n' % (boundary))
for fieldname, value in six.iteritems(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write('Content-Type: %s\r\n\r\n' %
(get_content_type(filename)))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write('Content-Type: text/plain\r\n\r\n')
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, unicode):
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write('\r\n')
body.write(b'\r\n')
body.write('--%s--\r\n' % (boundary))
body.write(b('--%s--\r\n' % (boundary)))
content_type = 'multipart/form-data; boundary=%s' % boundary
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type

47
libs/requests/packages/urllib3/packages/mimetools_choose_boundary/__init__.py

@ -0,0 +1,47 @@
"""The function mimetools.choose_boundary() from Python 2.7, which seems to
have disappeared in Python 3 (although email.generator._make_boundary() might
work as a replacement?).
Tweaked to use lock from threading rather than thread.
"""
import os
from threading import Lock
_counter_lock = Lock()
_counter = 0
def _get_next_counter():
global _counter
with _counter_lock:
_counter += 1
return _counter
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())

372
libs/requests/packages/urllib3/packages/six.py

@ -0,0 +1,372 @@
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.1.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
if PY3:
def get_unbound_function(unbound):
return unbound
advance_iterator = next
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
def advance_iterator(it):
return it.next()
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return getattr(d, _iterkeys)()
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return getattr(d, _itervalues)()
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return getattr(d, _iteritems)()
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})

28
libs/requests/packages/urllib3/poolmanager.py

@ -1,32 +1,27 @@
# urllib3/poolmanager.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool,
get_host, connection_from_url,
)
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import get_host, connection_from_url, port_by_scheme
from .exceptions import HostChangedError
from .request import RequestMethods
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
from .request import RequestMethods
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
port_by_scheme = {
'http': 80,
'https': 443,
}
log = logging.getLogger(__name__)
class PoolManager(RequestMethods):
@ -105,7 +100,12 @@ class PoolManager(RequestMethods):
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
conn = self.connection_from_url(url)
return conn.urlopen(method, url, assert_same_host=False, **kw)
try:
return conn.urlopen(method, url, **kw)
except HostChangedError as e:
kw['retries'] = e.retries # Persist retries countdown
return self.urlopen(method, e.url, **kw)
class ProxyManager(RequestMethods):

8
libs/requests/packages/urllib3/request.py

@ -1,11 +1,13 @@
# urllib3/request.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from urllib import urlencode
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata

83
libs/requests/packages/urllib3/response.py

@ -1,5 +1,5 @@
# urllib3/response.py
# Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,21 +8,22 @@ import gzip
import logging
import zlib
from io import BytesIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # pylint: disable-msg=W0404
from .exceptions import HTTPError
from .exceptions import HTTPError
try:
basestring = basestring
except NameError: # Python 3
basestring = (str, bytes)
log = logging.getLogger(__name__)
def decode_gzip(data):
gzipper = gzip.GzipFile(fileobj=StringIO(data))
gzipper = gzip.GzipFile(fileobj=BytesIO(data))
return gzipper.read()
@ -71,7 +72,7 @@ class HTTPResponse(object):
self.strict = strict
self._decode_content = decode_content
self._body = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
@ -81,9 +82,22 @@ class HTTPResponse(object):
if hasattr(body, 'read'):
self._fp = body
if preload_content:
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
@ -98,10 +112,9 @@ class HTTPResponse(object):
return self._body
if self._fp:
return self.read(decode_content=self._decode_content,
cache_content=True)
return self.read(cache_content=True)
def read(self, amt=None, decode_content=True, cache_content=False):
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
@ -124,22 +137,22 @@ class HTTPResponse(object):
"""
content_encoding = self.headers.get('content-encoding')
decoder = self.CONTENT_DECODERS.get(content_encoding)
if decode_content is None:
decode_content = self._decode_content
data = self._fp and self._fp.read(amt)
if self._fp is None:
return
try:
if amt:
return data
if not decode_content or not decoder:
if cache_content:
self._body = data
return data
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
else:
return self._fp.read(amt)
try:
data = decoder(data)
if decode_content and decoder:
data = decoder(data)
except IOError:
raise HTTPError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
@ -150,12 +163,11 @@ class HTTPResponse(object):
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@staticmethod
def from_httplib(r, **response_kw):
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
@ -164,14 +176,17 @@ class HTTPResponse(object):
with ``original_response=r``.
"""
return HTTPResponse(body=r,
headers=dict(r.getheaders()),
status=r.status,
version=r.version,
reason=r.reason,
strict=r.strict,
original_response=r,
**response_kw)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
# In Python 3, the header keys are returned capitalised
headers=dict((k.lower(), v) for k,v in r.getheaders()),
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):

10
libs/requests/sessions.py

@ -25,7 +25,7 @@ def merge_kwargs(local_kwarg, default_kwarg):
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, basestring):
if isinstance(local_kwarg, str):
return local_kwarg
if local_kwarg is None:
@ -40,7 +40,7 @@ def merge_kwargs(local_kwarg, default_kwarg):
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k,v) in local_kwarg.items():
for (k,v) in list(local_kwarg.items()):
if v is None:
del kwargs[k]
@ -76,7 +76,7 @@ class Session(object):
self.config = config or {}
self.verify = verify
for (k, v) in defaults.items():
for (k, v) in list(defaults.items()):
self.config.setdefault(k, v)
self.poolmanager = PoolManager(
@ -150,12 +150,12 @@ class Session(object):
verify = self.verify
# use session's hooks as defaults
for key, cb in self.hooks.iteritems():
for key, cb in list(self.hooks.items()):
hooks.setdefault(key, cb)
# Expand header values.
if headers:
for k, v in headers.items() or {}:
for k, v in list(headers.items()) or {}:
headers[k] = header_expand(v)
args = dict(

2
libs/requests/status_codes.py

@ -79,7 +79,7 @@ _codes = {
codes = LookupDict(name='status_codes')
for (code, titles) in _codes.items():
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):

4
libs/requests/structures.py

@ -18,7 +18,7 @@ class CaseInsensitiveDict(dict):
@property
def lower_keys(self):
if not hasattr(self, '_lower_keys') or not self._lower_keys:
self._lower_keys = dict((k.lower(), k) for k in self.iterkeys())
self._lower_keys = dict((k.lower(), k) for k in list(self.keys()))
return self._lower_keys
def _clear_lower_keys(self):
@ -63,4 +63,4 @@ class LookupDict(dict):
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
return self.__dict__.get(key, default)

61
libs/requests/utils.py

@ -11,16 +11,28 @@ that are also useful for external consumption.
import cgi
import codecs
import cookielib
import os
import random
import re
import zlib
import urllib
from urllib2 import parse_http_list as _parse_list_header
from .compat import parse_http_list as _parse_list_header
from .compat import quote, unquote, cookielib, SimpleCookie, is_py2
def dict_from_string(s):
"""Returns a MultiDict with Cookies."""
cookies = dict()
c = SimpleCookie()
c.load(s)
for k,v in list(c.items()):
cookies.update({k: v.value})
return cookies
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
@ -132,16 +144,16 @@ def header_expand(headers):
collector = []
if isinstance(headers, dict):
headers = headers.items()
headers = list(headers.items())
elif isinstance(headers, basestring):
elif isinstance(headers, str):
return headers
for i, (value, params) in enumerate(headers):
_params = []
for (p_k, p_v) in params.items():
for (p_k, p_v) in list(params.items()):
_params.append('%s=%s' % (p_k, p_v))
@ -166,17 +178,11 @@ def header_expand(headers):
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
if is_py2:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
else:
L = [chr(random.randrange(0, 256)).encode('utf-8') for i in range(n)]
return b"".join(L)
def dict_from_cookiejar(cj):
@ -187,9 +193,9 @@ def dict_from_cookiejar(cj):
cookie_dict = {}
for _, cookies in cj._cookies.items():
for _, cookies in cookies.items():
for cookie in cookies.values():
for _, cookies in list(cj._cookies.items()):
for _, cookies in list(cookies.items()):
for cookie in list(cookies.values()):
# print cookie
cookie_dict[cookie.name] = cookie.value
@ -221,7 +227,7 @@ def add_dict_to_cookiejar(cj, cookie_dict):
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
for k, v in cookie_dict.items():
for k, v in list(cookie_dict.items()):
cookie = cookielib.Cookie(
version=0,
@ -276,6 +282,9 @@ def get_encoding_from_headers(headers):
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def unicode_from_html(content):
"""Attempts to decode an HTML string into unicode.
@ -287,7 +296,7 @@ def unicode_from_html(content):
for encoding in encodings:
try:
return unicode(content, encoding)
return str(content, encoding)
except (UnicodeError, TypeError):
pass
@ -334,13 +343,13 @@ def get_unicode_from_response(r):
if encoding:
try:
return unicode(r.content, encoding)
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return unicode(r.content, encoding, errors='replace')
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
@ -393,6 +402,6 @@ def requote_path(path):
This function passes the given path through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
parts = path.split("/")
parts = (urllib.quote(urllib.unquote(part), safe="") for part in parts)
return "/".join(parts)
parts = path.split(b"/")
parts = (quote(unquote(part), safe=b"") for part in parts)
return b"/".join(parts)

438
libs/simplejson/__init__.py

@ -1,438 +0,0 @@
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)

2602
libs/simplejson/_speedups.c

File diff suppressed because it is too large

421
libs/simplejson/decoder.py

@ -1,421 +0,0 @@
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end

501
libs/simplejson/encoder.py

@ -1,501 +0,0 @@
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &amp;) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode

119
libs/simplejson/ordered_dict.py

@ -1,119 +0,0 @@
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other

77
libs/simplejson/scanner.py

@ -1,77 +0,0 @@
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner

39
libs/simplejson/tool.py

@ -1,39 +0,0 @@
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()

11
libs/sqlalchemy/__init__.py

@ -1,5 +1,5 @@
# sqlalchemy/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,7 +8,6 @@ import inspect
import sys
import sqlalchemy.exc as exceptions
sys.modules['sqlalchemy.exceptions'] = exceptions
from sqlalchemy.sql import (
alias,
@ -39,6 +38,7 @@ from sqlalchemy.sql import (
or_,
outerjoin,
outparam,
over,
select,
subquery,
text,
@ -75,6 +75,7 @@ from sqlalchemy.types import (
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
@ -83,6 +84,7 @@ from sqlalchemy.types import (
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARCHAR,
@ -115,6 +117,9 @@ from sqlalchemy.engine import create_engine, engine_from_config
__all__ = sorted(name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)))
__version__ = '0.6.6'
__version__ = '0.7.5'
del inspect, sys
from sqlalchemy import util as _sa_util
_sa_util.importlater.resolve_all()

64
libs/sqlalchemy/cextension/processors.c

@ -66,13 +66,24 @@ str_to_datetime(PyObject *self, PyObject *arg)
{
const char *str;
unsigned int year, month, day, hour, minute, second, microsecond = 0;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
str = PyString_AsString(arg);
if (str == NULL)
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string '%.200s' "
"- value is not a string.",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
/* microseconds are optional */
/*
@ -82,7 +93,14 @@ str_to_datetime(PyObject *self, PyObject *arg)
*/
if (sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day,
&hour, &minute, &second, &microsecond) < 6) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse datetime string.");
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse datetime string: %.200s",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
return PyDateTime_FromDateAndTime(year, month, day,
@ -94,13 +112,23 @@ str_to_time(PyObject *self, PyObject *arg)
{
const char *str;
unsigned int hour, minute, second, microsecond = 0;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
str = PyString_AsString(arg);
if (str == NULL)
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string '%.200s' - value is not a string.",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
/* microseconds are optional */
/*
@ -110,7 +138,14 @@ str_to_time(PyObject *self, PyObject *arg)
*/
if (sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second,
&microsecond) < 3) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse time string.");
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse time string: %.200s",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
return PyTime_FromTime(hour, minute, second, microsecond);
@ -121,16 +156,33 @@ str_to_date(PyObject *self, PyObject *arg)
{
const char *str;
unsigned int year, month, day;
PyObject *err_repr;
if (arg == Py_None)
Py_RETURN_NONE;
str = PyString_AsString(arg);
if (str == NULL)
if (str == NULL) {
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string '%.200s' - value is not a string.",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
if (sscanf(str, "%4u-%2u-%2u", &year, &month, &day) != 3) {
PyErr_SetString(PyExc_ValueError, "Couldn't parse date string.");
err_repr = PyObject_Repr(arg);
if (err_repr == NULL)
return NULL;
PyErr_Format(
PyExc_ValueError,
"Couldn't parse date string: %.200s",
PyString_AsString(err_repr));
Py_DECREF(err_repr);
return NULL;
}
return PyDate_FromDate(year, month, day);

6
libs/sqlalchemy/cextension/resultproxy.c

@ -13,6 +13,8 @@ typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
typedef Py_ssize_t (*lenfunc)(PyObject *);
#define PyInt_FromSsize_t(x) PyInt_FromLong(x)
typedef intargfunc ssizeargfunc;
#endif
@ -276,7 +278,7 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
return NULL;
}
indexobject = PyTuple_GetItem(record, 1);
indexobject = PyTuple_GetItem(record, 2);
if (indexobject == NULL)
return NULL;
@ -296,7 +298,7 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
return NULL;
PyErr_Format(exception,
"Ambiguous column name '%s' in result set! "
"Ambiguous column name '%.200s' in result set! "
"try 'use_labels' option on select statement.", cstr_key);
return NULL;
}

2
libs/sqlalchemy/connectors/__init__.py

@ -1,5 +1,5 @@
# connectors/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

11
libs/sqlalchemy/connectors/mxodbc.py

@ -1,5 +1,5 @@
# connectors/mxodbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -21,11 +21,8 @@ For more info on mxODBC, see http://www.egenix.com/
import sys
import re
import warnings
from decimal import Decimal
from sqlalchemy.connectors import Connector
from sqlalchemy import types as sqltypes
import sqlalchemy.processors as processors
class MxODBCConnector(Connector):
driver='mxodbc'
@ -109,9 +106,9 @@ class MxODBCConnector(Connector):
opts.pop('database', None)
return (args,), opts
def is_disconnect(self, e):
# eGenix recommends checking connection.closed here,
# but how can we get a handle on the current connection?
def is_disconnect(self, e, connection, cursor):
# TODO: eGenix recommends checking connection.closed here
# Does that detect dropped connections ?
if isinstance(e, self.dbapi.ProgrammingError):
return "connection already closed" in str(e)
elif isinstance(e, self.dbapi.Error):

150
libs/sqlalchemy/connectors/mysqldb.py

@ -0,0 +1,150 @@
"""Define behaviors common to MySQLdb dialects.
Currently includes MySQL and Drizzle.
"""
from sqlalchemy.connectors import Connector
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
from sqlalchemy import processors
import re
# the subclassing of Connector by all classes
# here is not strictly necessary
class MySQLDBExecutionContext(Connector):
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLDBCompiler(Connector):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def post_process_text(self, text):
return text.replace('%', '%%')
class MySQLDBIdentifierPreparer(Connector):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class MySQLDBConnector(Connector):
driver = 'mysqldb'
supports_unicode_statements = False
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
@classmethod
def dbapi(cls):
# is overridden when pymysql is used
return __import__('MySQLdb')
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned
# as Unicode, both in raw SQL operations and with column types like
# String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + '.constants.CLIENT'
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts['client_flag'] = client_flag
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
# on a connection via set_character_set()
if self.server_version_info < (4, 1, 0):
try:
return connection.connection.character_set_name()
except AttributeError:
# < 1.2.1 final MySQL-python drivers have no charset support.
# a query is needed.
pass
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
if 'character_set_results' in opts:
return opts['character_set_results']
try:
return connection.connection.character_set_name()
except AttributeError:
# Still no charset on < 1.2.1 final...
if 'character_set' in opts:
return opts['character_set']
else:
util.warn(
"Could not detect the connection character set with this "
"combination of MySQL server and MySQL-python. "
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
return 'latin1'

44
libs/sqlalchemy/connectors/pyodbc.py

@ -1,5 +1,5 @@
# connectors/pyodbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -10,7 +10,6 @@ from sqlalchemy.util import asbool
import sys
import re
import urllib
import decimal
class PyODBCConnector(Connector):
driver='pyodbc'
@ -30,6 +29,14 @@ class PyODBCConnector(Connector):
# if the freetds.so is detected
freetds = False
# will be set to the string version of
# the FreeTDS driver if freetds is detected
freetds_driver_version = None
# will be set to True after initialize()
# if the libessqlsrv.so is detected
easysoft = False
@classmethod
def dbapi(cls):
return __import__('pyodbc')
@ -82,7 +89,7 @@ class PyODBCConnector(Connector):
connectors.extend(['%s=%s' % (k,v) for k,v in keys.iteritems()])
return [[";".join (connectors)], connect_args]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return "The cursor's connection has been closed." in str(e) or \
'Attempt to use a closed connection.' in str(e)
@ -99,20 +106,43 @@ class PyODBCConnector(Connector):
dbapi_con = connection.connection
self.freetds = bool(re.match(r".*libtdsodbc.*\.so",
dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
_sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
))
self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
))
if self.freetds:
self.freetds_driver_version = dbapi_con.getinfo(pyodbc.SQL_DRIVER_VER)
# the "Py2K only" part here is theoretical.
# have not tried pyodbc + python3.1 yet.
# Py2K
self.supports_unicode_statements = not self.freetds
self.supports_unicode_binds = not self.freetds
self.supports_unicode_statements = not self.freetds and not self.easysoft
self.supports_unicode_binds = (not self.freetds or
self.freetds_driver_version >= '0.91') and not self.easysoft
# end Py2K
# run other initialization which asks for user name, etc.
super(PyODBCConnector, self).initialize(connection)
def _dbapi_version(self):
if not self.dbapi:
return ()
return self._parse_dbapi_version(self.dbapi.version)
def _parse_dbapi_version(self, vers):
m = re.match(
r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
vers
)
if not m:
return ()
vers = tuple([int(x) for x in m.group(1).split(".")])
if m.group(2):
vers += (m.group(2),)
return vers
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []

4
libs/sqlalchemy/connectors/zxJDBC.py

@ -1,5 +1,5 @@
# connectors/zxJDBC.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -46,7 +46,7 @@ class ZxJDBCConnector(Connector):
self.jdbc_driver_name],
opts]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)

4
libs/sqlalchemy/databases/__init__.py

@ -1,5 +1,5 @@
# databases/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -12,6 +12,7 @@ from sqlalchemy.dialects.sqlite import base as sqlite
from sqlalchemy.dialects.postgresql import base as postgresql
postgres = postgresql
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.drizzle import base as drizzle
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.firebird import base as firebird
from sqlalchemy.dialects.maxdb import base as maxdb
@ -23,6 +24,7 @@ from sqlalchemy.dialects.sybase import base as sybase
__all__ = (
'access',
'drizzle',
'firebird',
'informix',
'maxdb',

3
libs/sqlalchemy/dialects/__init__.py

@ -1,11 +1,12 @@
# dialects/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__all__ = (
# 'access',
'drizzle',
'firebird',
# 'informix',
# 'maxdb',

9
libs/sqlalchemy/dialects/access/base.py

@ -9,9 +9,9 @@
"""
Support for the Microsoft Access database.
This dialect is *not* ported to SQLAlchemy 0.6.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
This dialect is *not* tested on SQLAlchemy 0.6.
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
"""
@ -51,15 +51,10 @@ class AcSmallInteger(types.SmallInteger):
return "SMALLINT"
class AcDateTime(types.DateTime):
def __init__(self, *a, **kw):
super(AcDateTime, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def __init__(self, *a, **kw):
super(AcDate, self).__init__(False)
def get_col_spec(self):
return "DATETIME"

18
libs/sqlalchemy/dialects/drizzle/__init__.py

@ -0,0 +1,18 @@
from sqlalchemy.dialects.drizzle import base, mysqldb
# default dialect
base.dialect = mysqldb.dialect
from sqlalchemy.dialects.drizzle.base import \
BIGINT, BINARY, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, \
FLOAT, INTEGER, \
NUMERIC, REAL, TEXT, TIME, TIMESTAMP, \
VARBINARY, VARCHAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
'ENUM', 'FLOAT', 'INTEGER',
'NUMERIC', 'SET', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
'VARBINARY', 'VARCHAR', 'dialect'
)

582
libs/sqlalchemy/dialects/drizzle/base.py

@ -0,0 +1,582 @@
# drizzle/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2010-2011 Monty Taylor <mordred@inaugust.com>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Drizzle database.
Supported Versions and Features
-------------------------------
SQLAlchemy supports the Drizzle database starting with 2010.08.
with capabilities increasing with more modern servers.
Most available DBAPI drivers are supported; see below.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 2010.08
Table Reflection 2010.08
DDL Generation 2010.08
utf8/Full Unicode Connections 2010.08
Transactions 2010.08
Two-Phase Transactions 2010.08
Nested Transactions 2010.08
===================================== ===============
See the official Drizzle documentation for detailed information about features
supported in any given server release.
Connecting
----------
See the API documentation on individual drivers for details on connecting.
Connection Timeouts
-------------------
Drizzle features an automatic connection close behavior, for connections that
have been idle for eight hours or more. To circumvent having this issue, use
the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('drizzle+mysqldb://...', pool_recycle=3600)
Storage Engines
---------------
Drizzle defaults to the ``InnoDB`` storage engine, which is transactional.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``drizzle_engine='whatever'`` to the ``Table`` constructor. Any Drizzle table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
drizzle_engine='InnoDB',
)
Keys
----
Not all Drizzle storage engines support foreign keys. For ``BlitzDB`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
Drizzle SQL Extensions
----------------------
Many of the Drizzle SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid Drizzle statement can be executed as a string as well.
Some limited direct support for Drizzle extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., drizzle_limit=10)
"""
import datetime, inspect, re, sys
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, log, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from array import array as _array
from sqlalchemy.engine import reflection
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.mysql import base as mysql_dialect
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
class _NumericType(object):
"""Base for Drizzle numeric types."""
def __init__(self, **kw):
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
class _StringType(mysql_dialect._StringType):
"""Base for Drizzle string types."""
def __init__(self, collation=None,
binary=False,
**kw):
kw['national'] = False
super(_StringType, self).__init__(collation=collation,
binary=binary,
**kw)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""Drizzle NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""Drizzle DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""Drizzle DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""Drizzle REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""Drizzle FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(sqltypes.INTEGER):
"""Drizzle INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, **kw):
"""Construct an INTEGER.
"""
super(INTEGER, self).__init__(**kw)
class BIGINT(sqltypes.BIGINT):
"""Drizzle BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, **kw):
"""Construct a BIGINTEGER.
"""
super(BIGINT, self).__init__(**kw)
class _DrizzleTime(mysql_dialect._MSTime):
"""Drizzle TIME type."""
class TIMESTAMP(sqltypes.TIMESTAMP):
"""Drizzle TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
class TEXT(_StringType, sqltypes.TEXT):
"""Drizzle TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Drizzle VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Drizzle CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
class ENUM(mysql_dialect.ENUM):
"""Drizzle ENUM type."""
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that Drizzle will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See Drizzle ENUM documentation.)
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
super(ENUM, self).__init__(*enums, **kw)
class _DrizzleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMERIC
colspecs = {
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: _DrizzleTime,
sqltypes.Enum: ENUM,
sqltypes.Boolean: _DrizzleBoolean,
}
# All the types we have in Drizzle
ischema_names = {
'BIGINT': BIGINT,
'BINARY': BINARY,
'BLOB': BLOB,
'BOOLEAN': BOOLEAN,
'CHAR': CHAR,
'DATE': DATE,
'DATETIME': DATETIME,
'DECIMAL': DECIMAL,
'DOUBLE': DOUBLE,
'ENUM': ENUM,
'FLOAT': FLOAT,
'INT': INTEGER,
'INTEGER': INTEGER,
'NUMERIC': NUMERIC,
'TEXT': TEXT,
'TIME': TIME,
'TIMESTAMP': TIMESTAMP,
'VARBINARY': VARBINARY,
'VARCHAR': VARCHAR,
}
class DrizzleCompiler(mysql_dialect.MySQLCompiler):
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.Integer):
return 'INTEGER'
else:
return super(DrizzleCompiler, self).visit_typeclause(typeclause)
def visit_cast(self, cast, **kwargs):
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler):
pass
class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler):
def _extend_numeric(self, type_, spec):
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL
COLLATE annotations and Drizzle specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_NCHAR(self, type):
raise NotImplementedError("Drizzle does not support NCHAR")
def visit_NVARCHAR(self, type):
raise NotImplementedError("Drizzle does not support NVARCHAR")
def visit_FLOAT(self, type_):
if type_.scale is not None and type_.precision is not None:
return "FLOAT(%s, %s)" % (type_.precision, type_.scale)
else:
return "FLOAT"
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_BLOB(self, type_):
return "BLOB"
class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext):
pass
class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer):
pass
class DrizzleDialect(mysql_dialect.MySQLDialect):
"""Details of the Drizzle dialect. Not used directly in application code."""
name = 'drizzle'
_supports_cast = True
supports_sequences = False
supports_native_boolean = True
supports_views = False
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = DrizzleCompiler
ddl_compiler = DrizzleDDLCompiler
type_compiler = DrizzleTypeCompiler
ischema_names = ischema_names
preparer = DrizzleIdentifierPreparer
def on_connect(self):
"""Force autocommit - Drizzle Bug#707842 doesn't set this
properly"""
def connect(conn):
conn.autocommit(False)
return connect
def do_commit(self, connection):
"""Execute a COMMIT."""
connection.commit()
def do_rollback(self, connection):
"""Execute a ROLLBACK."""
connection.rollback()
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = 'utf8'
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
raise NotImplementedError
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
return 0
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
charset = self._connection_charset
rs = connection.execute('SELECT CHARACTER_SET_NAME, COLLATION_NAME from data_dictionary.COLLATIONS')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
self._server_ansiquotes = False
self._backslash_escapes = False
log.class_logger(DrizzleDialect)

69
libs/sqlalchemy/dialects/drizzle/mysqldb.py

@ -0,0 +1,69 @@
"""Support for the Drizzle database via the Drizzle-python adapter.
Drizzle-Python is available at:
http://sourceforge.net/projects/mysql-python
At least version 1.2.1 or 1.2.2 should be used.
Connecting
-----------
Connect string format::
drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
Unicode
-------
Drizzle accommodates Python ``unicode`` objects directly and
uses the ``utf8`` encoding in all cases.
Known Issues
-------------
Drizzle-python at least as of version 1.2.2 has a serious memory leak related
to unicode conversion, a feature which is disabled via ``use_unicode=0``.
The recommended connection form with SQLAlchemy is::
engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600)
"""
from sqlalchemy.dialects.drizzle.base import (DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler, DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector
)
class DrizzleExecutionContext_mysqldb(
MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(
MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
preparer = DrizzleIdentifierPreparer_mysqldb
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'
dialect = DrizzleDialect_mysqldb

2
libs/sqlalchemy/dialects/firebird/__init__.py

@ -1,5 +1,5 @@
# firebird/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

34
libs/sqlalchemy/dialects/firebird/base.py

@ -1,5 +1,5 @@
# firebird/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -244,6 +244,10 @@ class FBCompiler(sql.compiler.SQLCompiler):
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr)
else:
@ -263,9 +267,9 @@ class FBCompiler(sql.compiler.SQLCompiler):
result = ""
if select._limit:
result += "FIRST %d " % select._limit
result += "FIRST %s " % self.process(sql.literal(select._limit))
if select._offset:
result +="SKIP %d " % select._offset
result +="SKIP %s " % self.process(sql.literal(select._offset))
if select._distinct:
result += "DISTINCT "
return result
@ -331,12 +335,13 @@ class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq)
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
@ -357,7 +362,6 @@ class FBDialect(default.DefaultDialect):
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
@ -374,7 +378,13 @@ class FBDialect(default.DefaultDialect):
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = self.server_version_info > (2, )
self._version_two = ('firebird' in self.server_version_info and \
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and \
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
@ -382,8 +392,9 @@ class FBDialect(default.DefaultDialect):
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
else:
self.implicit_returning = True
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
@ -509,7 +520,7 @@ class FBDialect(default.DefaultDialect):
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT DISTINCT r.rdb$field_name AS fname,
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
@ -585,7 +596,8 @@ class FBDialect(default.DefaultDialect):
'name' : name,
'type' : coltype,
'nullable' : not bool(row['null_flag']),
'default' : defvalue
'default' : defvalue,
'autoincrement':defvalue is None
}
if orig_colname.lower() == orig_colname:

25
libs/sqlalchemy/dialects/firebird/kinterbasdb.py

@ -1,5 +1,5 @@
# firebird/kinterbasdb.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -48,7 +48,9 @@ __ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurr
from sqlalchemy.dialects.firebird.base import FBDialect, \
FBCompiler, FBExecutionContext
from sqlalchemy import util, types as sqltypes
import decimal
from sqlalchemy.util.compat import decimal
from re import match
class _FBNumeric_kinterbasdb(sqltypes.Numeric):
def bind_processor(self, dialect):
@ -133,20 +135,25 @@ class FBDialect_kinterbasdb(FBDialect):
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature. This is more than enough for our purposes,
# as this is mainly (only?) used by the testsuite.
from re import match
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+) \w+ (\d+)\.(\d+)', version)
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version)
return tuple([int(x) for x in m.group(5, 6, 4)])
def is_disconnect(self, e):
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)

2
libs/sqlalchemy/dialects/informix/__init__.py

@ -1,5 +1,5 @@
# informix/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

125
libs/sqlalchemy/dialects/informix/base.py

@ -1,5 +1,5 @@
# informix/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# coding: gbk
#
# This module is part of SQLAlchemy and is released under
@ -20,6 +20,124 @@ from sqlalchemy.sql import compiler, text
from sqlalchemy.engine import default, reflection
from sqlalchemy import types as sqltypes
RESERVED_WORDS = set(
["abs", "absolute", "access", "access_method", "acos", "active", "add",
"address", "add_months", "admin", "after", "aggregate", "alignment",
"all", "allocate", "all_rows", "altere", "and", "ansi", "any", "append",
"array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach",
"attributes", "audit", "authentication", "authid", "authorization",
"authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode",
"avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash",
"avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl",
"avoid_star_join", "avoid_subqf", "based", "before", "begin",
"between", "bigint", "bigserial", "binary", "bitand", "bitandnot",
"bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both",
"bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call",
"cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char",
"character", "character_length", "char_length", "check", "class",
"class_origin", "client", "clob", "clobdir", "close", "cluster",
"clustersize", "cobol", "codeset", "collation", "collection",
"column", "columns", "commit", "committed", "commutator", "component",
"components", "concat", "concurrent", "connect", "connection",
"connection_name", "connect_by_iscycle", "connect_by_isleaf",
"connect_by_rootconst", "constraint", "constraints", "constructor",
"context", "continue", "copy", "cos", "costfunc", "count", "crcols",
"create", "cross", "current", "current_role", "currval", "cursor",
"cycle", "database", "datafiles", "dataskip", "date", "datetime",
"day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm",
"dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec",
"decimal", "declare", "decode", "decrypt_binary", "decrypt_char",
"dec_t", "default", "default_role", "deferred", "deferred_prepare",
"define", "delay", "delete", "deleting", "delimited", "delimiter",
"deluxe", "desc", "describe", "descriptor", "detach", "diagnostics",
"directives", "dirty", "disable", "disabled", "disconnect", "disk",
"distinct", "distributebinary", "distributesreferences",
"distributions", "document", "domain", "donotdistribute", "dormant",
"double", "drop", "dtime_t", "each", "elif", "else", "enabled",
"encryption", "encrypt_aes", "encrypt_tdes", "end", "enum",
"environment", "error", "escape", "exception", "exclusive", "exec",
"execute", "executeanywhere", "exemption", "exists", "exit", "exp",
"explain", "explicit", "express", "expression", "extdirectives",
"extend", "extent", "external", "fact", "false", "far", "fetch",
"file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first",
"first_rows", "fixchar", "fixed", "float", "floor", "flush", "for",
"force", "forced", "force_ddl_exec", "foreach", "foreign", "format",
"format_units", "fortran", "found", "fraction", "fragment",
"fragments", "free", "from", "full", "function", "general", "get",
"gethint", "global", "go", "goto", "grant", "greaterthan",
"greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr",
"hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray",
"idslbacreadset", "idslbacreadtree", "idslbacrules",
"idslbacwritearray", "idslbacwriteset", "idslbacwritetree",
"idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table",
"ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate",
"implicit", "implicit_pdq", "in", "inactive", "increment", "index",
"indexes", "index_all", "index_sj", "indicator", "informix", "init",
"initcap", "inline", "inner", "inout", "insert", "inserting", "instead",
"int", "int8", "integ", "integer", "internal", "internallength",
"interval", "into", "intrvl_t", "is", "iscanonical", "isolation",
"item", "iterator", "java", "join", "keep", "key", "label", "labeleq",
"labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub",
"labeltostring", "language", "last", "last_day", "leading", "left",
"length", "lessthan", "lessthanorequal", "let", "level", "like",
"limit", "list", "listing", "load", "local", "locator", "lock", "locks",
"locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile",
"low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches",
"max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium",
"memory", "memory_resident", "merge", "message_length", "message_text",
"middle", "min", "minute", "minvalue", "mod", "mode", "moderate",
"modify", "module", "money", "month", "months_between", "mounting",
"multiset", "multi_index", "name", "nchar", "negator", "new", "next",
"nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue",
"nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder",
"normal", "not", "notemplatearg", "notequal", "null", "nullif",
"numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar",
"nvl", "octet_length", "of", "off", "old", "on", "online", "only",
"opaque", "opclass", "open", "optcompind", "optical", "optimization",
"option", "or", "order", "ordered", "out", "outer", "output",
"override", "page", "parallelizable", "parameter", "partition",
"pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos",
"pipe", "pli", "pload", "policy", "pow", "power", "precision",
"prepare", "previous", "primary", "prior", "private", "privileges",
"procedure", "properties", "public", "put", "raise", "range", "raw",
"read", "real", "recordend", "references", "referencing", "register",
"rejectfile", "relative", "release", "remainder", "rename",
"reoptimization", "repeatable", "replace", "replication", "reserve",
"resolution", "resource", "restart", "restrict", "resume", "retain",
"retainupdatelocks", "return", "returned_sqlstate", "returning",
"returns", "reuse", "revoke", "right", "robin", "role", "rollback",
"rollforward", "root", "round", "routine", "row", "rowid", "rowids",
"rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples",
"sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp",
"seclabel_by_name", "seclabel_to_char", "second", "secondary",
"section", "secured", "security", "selconst", "select", "selecting",
"selfunc", "selfuncargs", "sequence", "serial", "serial8",
"serializable", "serveruuid", "server_name", "session", "set",
"setsessionauth", "share", "short", "siblings", "signed", "sin",
"sitename", "size", "skall", "skinhibit", "skip", "skshow",
"smallfloat", "smallint", "some", "specific", "sql", "sqlcode",
"sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt",
"stability", "stack", "standard", "start", "star_join", "statchange",
"statement", "static", "statistics", "statlevel", "status", "stdev",
"step", "stop", "storage", "store", "strategies", "string",
"stringtolabel", "struct", "style", "subclass_origin", "substr",
"substring", "sum", "support", "sync", "synonym", "sysdate",
"sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table",
"tables", "tan", "task", "temp", "template", "test", "text", "then",
"time", "timeout", "to", "today", "to_char", "to_date",
"to_dsinterval", "to_number", "to_yminterval", "trace", "trailing",
"transaction", "transition", "tree", "trigger", "triggers", "trim",
"true", "trunc", "truncate", "trusted", "type", "typedef", "typeid",
"typename", "typeof", "uid", "uncommitted", "under", "union",
"unique", "units", "unknown", "unload", "unlock", "unsigned",
"update", "updating", "upon", "upper", "usage", "use",
"uselastcommitted", "user", "use_hash", "use_nl", "use_subqf",
"using", "value", "values", "var", "varchar", "variable", "variance",
"variant", "varying", "vercols", "view", "violations", "void",
"volatile", "wait", "warning", "weekday", "when", "whenever", "where",
"while", "with", "without", "work", "write", "writedown", "writeup",
"xadatasource", "xid", "xload", "xunload", "year"
])
class InfoDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
@ -213,6 +331,10 @@ class InfoDDLCompiler(compiler.DDLCompiler):
text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint)
return text
class InformixIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class InformixDialect(default.DefaultDialect):
name = 'informix'
@ -224,6 +346,7 @@ class InformixDialect(default.DefaultDialect):
ddl_compiler = InfoDDLCompiler
colspecs = colspecs
ischema_names = ischema_names
preparer = InformixIdentifierPreparer
default_paramstyle = 'qmark'
def __init__(self, has_transactions=True, *args, **kwargs):

4
libs/sqlalchemy/dialects/informix/informixdb.py

@ -1,5 +1,5 @@
# informix/informixdb.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -62,7 +62,7 @@ class InformixDialect_informixdb(InformixDialect):
v = VERSION_RE.split(connection.connection.dbms_version)
return (int(v[1]), int(v[2]), v[3])
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return 'closed the connection' in str(e) \
or 'connection not open' in str(e)

2
libs/sqlalchemy/dialects/maxdb/__init__.py

@ -1,5 +1,5 @@
# maxdb/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

77
libs/sqlalchemy/dialects/maxdb/base.py

@ -1,14 +1,14 @@
# maxdb/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MaxDB database.
This dialect is *not* ported to SQLAlchemy 0.6.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
This dialect is *not* tested on SQLAlchemy 0.6.
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
Overview
--------
@ -31,8 +31,6 @@ use upper case for DB-API.
Implementation Notes
--------------------
Also check the DatabaseNotes page on the wiki for detailed information.
With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API
generated exceptions are broken and can cause Python to crash.
@ -58,6 +56,62 @@ required components such as an Max-aware 'old oracle style' join compiler
integration- email the devel list if you're interested in working on
this.
Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API
* MaxDB has severe limitations on OUTER JOINs, which are essential to ORM
eager loading. And rather than raise an error if a SELECT can't be serviced,
the database simply returns incorrect results.
* Version 7.6.03.07 seems to JOIN properly, however the docs do not show the
OUTER restrictions being lifted (as of this writing), and no changelog is
available to confirm either. If you are using a different server version and
your tasks require the ORM or any semi-advanced SQL through the SQL layer,
running the SQLAlchemy test suite against your database is HIGHLY
recommended before you begin.
* Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON
lhs.col=rhs.col` vs `rhs.col=lhs.col`!
* Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER
BY col` - these aliased, DISTINCT, ordered queries need to be re-written to
order by the alias name.
* Version 7.6.x supports creating a SAVEPOINT but not its RELEASE.
* MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent
sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to
be inserted rather than NULL to generate a value.
* MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join
indicators.
* The SQLAlchemy dialect is schema-aware and probably won't function correctly
on server versions (pre-7.6?). Support for schema-less server versions could
be added if there's call.
* ORDER BY is not supported in subqueries. LIMIT is not supported in
subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not
so useful. OFFSET does not work in 7.6 despite being in the docs. Row number
tricks in WHERE via ROWNO may be possible but it only seems to allow
less-than comparison!
* Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM
(SELECT * FROM a) LIMIT 2`
* MaxDB does not support sql's CAST and can only usefullly cast two types.
There isn't much implicit type conversion, so be precise when creating
`PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same.
sapdb.dbapi
^^^^^^^^^^^
* As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API
are no longer available. A forum posting at SAP states that the Python
driver will be available again "in the future". The last release from MySQL
AB works if you can find it.
* sequence.NEXTVAL skips every other value!
* No rowcount for executemany()
* If an INSERT into a table with a DEFAULT SERIAL column inserts the results
of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have
the serial id. It needs to be manually yanked from tablename.CURRVAL.
* Super-duper picky about where bind params can be placed. Not smart about
converting Python types for some functions, such as `MOD(5, ?)`.
* LONG (text, binary) values in result sets are read-once. The dialect uses a
caching RowProxy when these types are present.
* Connection objects seem like they want to be either `close()`d or garbage
collected, but not both. There's a warning issued but it seems harmless.
"""
import datetime, itertools, re
@ -117,15 +171,13 @@ class _StringType(sqltypes.String):
class MaxString(_StringType):
_type = 'VARCHAR'
def __init__(self, *a, **kw):
super(MaxString, self).__init__(*a, **kw)
class MaxUnicode(_StringType):
_type = 'VARCHAR'
def __init__(self, length=None, **kw):
super(MaxUnicode, self).__init__(length=length, encoding='unicode')
kw['encoding'] = 'unicode'
super(MaxUnicode, self).__init__(length=length, **kw)
class MaxChar(_StringType):
@ -135,8 +187,8 @@ class MaxChar(_StringType):
class MaxText(_StringType):
_type = 'LONG'
def __init__(self, *a, **kw):
super(MaxText, self).__init__(*a, **kw)
def __init__(self, length=None, **kw):
super(MaxText, self).__init__(length, **kw)
def get_col_spec(self):
spec = 'LONG'
@ -583,7 +635,7 @@ class MaxDBCompiler(compiler.SQLCompiler):
# LIMIT. Right? Other dialects seem to get away with
# dropping order.
if select._limit:
raise exc.InvalidRequestError(
raise exc.CompileError(
"MaxDB does not support ORDER BY in subqueries")
else:
return ""
@ -604,6 +656,7 @@ class MaxDBCompiler(compiler.SQLCompiler):
def limit_clause(self, select):
# The docs say offsets are supported with LIMIT. But they're not.
# TODO: maybe emulate by adding a ROWNO/ROWNUM predicate?
# TODO: does MaxDB support bind params for LIMIT / TOP ?
if self.is_subquery():
# sub queries need TOP
return ''

2
libs/sqlalchemy/dialects/maxdb/sapdb.py

@ -1,5 +1,5 @@
# maxdb/sapdb.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

2
libs/sqlalchemy/dialects/mssql/__init__.py

@ -1,5 +1,5 @@
# mssql/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

5
libs/sqlalchemy/dialects/mssql/adodbapi.py

@ -1,5 +1,5 @@
# mssql/adodbapi.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,6 +8,7 @@
The adodbapi dialect is not implemented for 0.6 at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
@ -61,7 +62,7 @@ class MSDialect_adodbapi(MSDialect):
connectors.append("Integrated Security=SSPI")
return [[";".join (connectors)], {}]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)

213
libs/sqlalchemy/dialects/mssql/base.py

@ -1,5 +1,5 @@
# mssql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -56,7 +56,7 @@ MSNVarchar, MSText, and MSNText. For example::
from sqlalchemy.dialects.mssql import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`Table`, the
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
@ -130,17 +130,57 @@ which has triggers::
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Scalar Select Comparisons
-------------------------
The MSSQL dialect contains a legacy behavior whereby comparing
a scalar select to a value using the ``=`` or ``!=`` operator
will resolve to IN or NOT IN, respectively. This behavior is
deprecated and will be removed in 0.8 - the ``s.in_()``/``~s.in_()`` operators
should be used when IN/NOT IN are desired.
For the time being, the existing behavior prevents a comparison
between scalar select and another value that actually wants to use ``=``.
To remove this behavior in a forwards-compatible way, apply this
compilation rule by placing the following code at the module import
level::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import _BinaryExpression
from sqlalchemy.sql.compiler import SQLCompiler
@compiles(_BinaryExpression, 'mssql')
def override_legacy_binary(element, compiler, **kw):
return SQLCompiler.visit_binary(compiler, element, **kw)
Known Issues
------------
@ -149,20 +189,19 @@ Known Issues
SQL Server 2005
"""
import datetime, decimal, inspect, operator, sys, re
import itertools
import datetime, operator, re
from sqlalchemy import sql, schema as sa_schema, exc, util
from sqlalchemy.sql import select, compiler, expression, \
operators as sql_operators, \
functions as sql_functions, util as sql_util
util as sql_util, cast
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import processors
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
VARBINARY, BLOB
from sqlalchemy.dialects.mssql import information_schema as ischema
MS_2008_VERSION = (10,)
@ -200,14 +239,13 @@ RESERVED_WORDS = set(
'writetext',
])
class REAL(sqltypes.Float):
"""A type for ``real`` numbers."""
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self):
super(REAL, self).__init__(precision=24)
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
@ -258,7 +296,7 @@ class TIME(sqltypes.TIME):
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?")
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
@ -289,7 +327,8 @@ class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kwargs):
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
@ -309,16 +348,15 @@ class _StringType(object):
class TEXT(_StringType, sqltypes.TEXT):
"""MSSQL TEXT type, for variable-length text up to 2^31 characters."""
def __init__(self, *args, **kw):
def __init__(self, length=None, collation=None, **kw):
"""Construct a TEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.Text.__init__(self, *args, **kw)
sqltypes.Text.__init__(self, length, **kw)
class NTEXT(_StringType, sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
@ -326,24 +364,22 @@ class NTEXT(_StringType, sqltypes.UnicodeText):
__visit_name__ = 'NTEXT'
def __init__(self, *args, **kwargs):
def __init__(self, length=None, collation=None, **kw):
"""Construct a NTEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kwargs.pop('collation', None)
_StringType.__init__(self, collation)
length = kwargs.pop('length', None)
sqltypes.UnicodeText.__init__(self, length, **kwargs)
sqltypes.UnicodeText.__init__(self, length, **kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
def __init__(self, length=None, collation=None, **kw):
"""Construct a VARCHAR.
:param length: Optinal, maximum data length, in characters.
@ -364,16 +400,15 @@ class VARCHAR(_StringType, sqltypes.VARCHAR):
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.VARCHAR.__init__(self, *args, **kw)
sqltypes.VARCHAR.__init__(self, length, **kw)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MSSQL NVARCHAR type.
For variable-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
def __init__(self, length=None, collation=None, **kw):
"""Construct a NVARCHAR.
:param length: Optional, Maximum data length, in characters.
@ -382,15 +417,14 @@ class NVARCHAR(_StringType, sqltypes.NVARCHAR):
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NVARCHAR.__init__(self, *args, **kw)
sqltypes.NVARCHAR.__init__(self, length, **kw)
class CHAR(_StringType, sqltypes.CHAR):
"""MSSQL CHAR type, for fixed-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
def __init__(self, length=None, collation=None, **kw):
"""Construct a CHAR.
:param length: Optinal, maximum data length, in characters.
@ -411,16 +445,15 @@ class CHAR(_StringType, sqltypes.CHAR):
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.CHAR.__init__(self, *args, **kw)
sqltypes.CHAR.__init__(self, length, **kw)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MSSQL NCHAR type.
For fixed-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
def __init__(self, length=None, collation=None, **kw):
"""Construct an NCHAR.
:param length: Optional, Maximum data length, in characters.
@ -429,9 +462,8 @@ class NCHAR(_StringType, sqltypes.NCHAR):
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NCHAR.__init__(self, *args, **kw)
sqltypes.NCHAR.__init__(self, length, **kw)
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
@ -510,7 +542,7 @@ ischema_names = {
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
@ -521,8 +553,11 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
else:
collation = None
if type_.length:
spec = spec + "(%d)" % type_.length
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
@ -534,9 +569,6 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_REAL(self, type_):
return "REAL"
def visit_TINYINT(self, type_):
return "TINYINT"
@ -576,7 +608,8 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_)
return self._extend("VARCHAR", type_,
length = type_.length or 'max')
def visit_CHAR(self, type_):
return self._extend("CHAR", type_)
@ -585,7 +618,8 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_)
return self._extend("NVARCHAR", type_,
length = type_.length or 'max')
def visit_date(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
@ -605,6 +639,12 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_VARBINARY(self, type_):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_):
return self.visit_BIT(type_)
@ -709,8 +749,8 @@ class MSSQLCompiler(compiler.SQLCompiler):
})
def __init__(self, *args, **kwargs):
super(MSSQLCompiler, self).__init__(*args, **kwargs)
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
@ -736,15 +776,21 @@ class MSSQLCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, it's version of LIMIT here """
if select._distinct or select._limit:
if select._distinct or select._limit is not None:
s = select._distinct and "DISTINCT " or ""
if select._limit:
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
if select._limit is not None:
if not select._offset:
s += "TOP %s " % (select._limit,)
s += "TOP %d " % select._limit
return s
return compiler.SQLCompiler.get_select_precolumns(self, select)
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in mssql is after the select keyword
return ""
@ -758,7 +804,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
# to use ROW_NUMBER(), an ORDER BY is required.
orderby = self.process(select._order_by_clause)
if not orderby:
raise exc.InvalidRequestError('MSSQL requires an order_by when '
raise exc.CompileError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
@ -769,12 +815,12 @@ class MSSQLCompiler(compiler.SQLCompiler):
% orderby).label("mssql_rn")
).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key!='mssql_rn'])
limitselect.append_whereclause("mssql_rn>%d" % _offset)
limitselect.append_whereclause(mssql_rn> _offset)
if _limit is not None:
limitselect.append_whereclause("mssql_rn<=%d" %
(_limit + _offset))
limitselect.append_whereclause(mssql_rn<=(_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@ -800,7 +846,6 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
self.tablealiases[alias.original] = alias
kwargs['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
@ -809,6 +854,9 @@ class MSSQLCompiler(compiler.SQLCompiler):
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
@ -866,6 +914,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
)
):
op = binary.operator == operator.eq and "IN" or "NOT IN"
util.warn_deprecated("Comparing a scalar select using ``=``/``!=`` will "
"no longer produce IN/NOT IN in 0.8. To remove this "
"behavior immediately, use the recipe at "
"http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons")
return self.process(
expression._BinaryExpression(binary.left,
binary.right, op),
@ -977,7 +1029,7 @@ class MSDDLCompiler(compiler.DDLCompiler):
colspec += " NULL"
if column.table is None:
raise exc.InvalidRequestError(
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
@ -1066,12 +1118,12 @@ class MSDialect(default.DefaultDialect):
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
util.warn("Savepoint support in mssql is experimental and "
"may lead to data loss.")
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
connection.execute("SAVE TRANSACTION %s" % name)
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
def initialize(self, connection):
@ -1108,15 +1160,20 @@ class MSDialect(default.DefaultDialect):
pass
return self.schema_name
def _unicode_cast(self, column):
if self.server_version_info >= MS_2005_VERSION:
return cast(column, NVARCHAR(_warn_on_bytestring=False))
else:
return column
def has_table(self, connection, tablename, schema=None):
current_schema = schema or self.default_schema_name
columns = ischema.columns
whereclause = self._unicode_cast(columns.c.table_name)==tablename
if current_schema:
whereclause = sql.and_(columns.c.table_name==tablename,
whereclause = sql.and_(whereclause,
columns.c.table_schema==current_schema)
else:
whereclause = columns.c.table_name==tablename
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@ -1180,7 +1237,10 @@ class MSDialect(default.DefaultDialect):
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
]
],
typemap = {
'name':sqltypes.Unicode()
}
)
)
indexes = {}
@ -1206,7 +1266,11 @@ class MSDialect(default.DefaultDialect):
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
]),
],
typemap = {
'name':sqltypes.Unicode()
}
),
)
for row in rp:
if row['index_id'] in indexes:
@ -1217,14 +1281,25 @@ class MSDialect(default.DefaultDialect):
@reflection.cache
def get_view_definition(self, connection, viewname, schema=None, **kw):
current_schema = schema or self.default_schema_name
views = ischema.views
s = sql.select([views.c.view_definition],
sql.and_(
views.c.table_schema == current_schema,
views.c.table_name == viewname
),
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
]
)
)
rp = connection.execute(s)
if rp:
view_def = rp.scalar()
return view_def

4
libs/sqlalchemy/dialects/mssql/information_schema.py

@ -1,12 +1,12 @@
# mssql/information_schema.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# TODO: should be using the sys. catalog with SQL Server, not information schema
from sqlalchemy import Table, MetaData, Column, ForeignKey
from sqlalchemy import Table, MetaData, Column
from sqlalchemy.types import String, Unicode, Integer, TypeDecorator
ischema = MetaData()

8
libs/sqlalchemy/dialects/mssql/mxodbc.py

@ -1,5 +1,5 @@
# mssql/mxodbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -51,15 +51,11 @@ of ``False`` will uncondtionally use string-escaped parameters.
"""
import re
import sys
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
from sqlalchemy.dialects.mssql.base import (MSExecutionContext, MSDialect,
MSSQLCompiler,
from sqlalchemy.dialects.mssql.base import (MSDialect,
MSSQLStrictCompiler,
_MSDateTime, _MSDate, TIME)

6
libs/sqlalchemy/dialects/mssql/pymssql.py

@ -1,5 +1,5 @@
# mssql/pymssql.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -41,7 +41,6 @@ Please consult the pymssql documentation for further information.
from sqlalchemy.dialects.mssql.base import MSDialect
from sqlalchemy import types as sqltypes, util, processors
import re
import decimal
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
@ -52,7 +51,6 @@ class _MSNumeric_pymssql(sqltypes.Numeric):
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
max_identifier_length = 30
driver = 'pymssql'
colspecs = util.update_copy(
@ -96,7 +94,7 @@ class MSDialect_pymssql(MSDialect):
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
for msg in (
"Error 10054",
"Not connected to any MS SQL server",

49
libs/sqlalchemy/dialects/mssql/pyodbc.py

@ -1,5 +1,5 @@
# mssql/pyodbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -35,27 +35,31 @@ Examples of pyodbc connection string URLs:
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection string
dynamically created that would appear like::
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
that would appear like::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string that is dynamically created, which also includes the port
information using the comma syntax. If your connection string
requires the port information to be passed as a ``port`` keyword
see the next example. This will create the following connection
string::
string which includes the port
information using the comma syntax. This will create the following
connection string::
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
string that is dynamically created that includes the port
string that includes the port
information as a separate ``port`` keyword. This will create the
following connection string::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection
string that includes a custom
ODBC driver name. This will create the following connection string::
DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass
If you require a connection string that is outside the options
presented above, use the ``odbc_connect`` keyword to pass in a
urlencoded connection string. What gets passed in will be urldecoded
@ -94,7 +98,12 @@ class _MSNumeric_pyodbc(sqltypes.Numeric):
"""
def bind_processor(self, dialect):
super_process = super(_MSNumeric_pyodbc, self).bind_processor(dialect)
super_process = super(_MSNumeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
@ -112,31 +121,35 @@ class _MSNumeric_pyodbc(sqltypes.Numeric):
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value._int]))
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in value._int]),
"0" * (value.adjusted() - (len(value._int)-1)))
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int)-1)))
else:
if (len(value._int) - 1) > value.adjusted():
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in value._int][0:value.adjusted() + 1]),
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in value._int][value.adjusted() + 1:]))
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in value._int][0:value.adjusted() + 1]))
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
@ -206,5 +219,7 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
self.description_encoding = description_encoding
self.use_scope_identity = self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
dialect = MSDialect_pyodbc

2
libs/sqlalchemy/dialects/mssql/zxjdbc.py

@ -1,5 +1,5 @@
# mssql/zxjdbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

4
libs/sqlalchemy/dialects/mysql/__init__.py

@ -1,11 +1,11 @@
# mysql/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mysql import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector
pyodbc, zxjdbc, mysqlconnector, pymysql
# default dialect
base.dialect = mysqldb.dialect

317
libs/sqlalchemy/dialects/mysql/base.py

@ -1,5 +1,5 @@
# mysql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -68,6 +68,22 @@ creation option can be specified in this syntax::
mysql_charset='utf8'
)
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
Keys
----
@ -81,7 +97,7 @@ foreign keys. For these tables, you may supply a
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT``` on
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
an integer primary key column::
>>> t = Table('mytable', metadata,
@ -152,14 +168,61 @@ available.
update(..., mysql_limit=10)
Troubleshooting
---------------
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using the
SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server version
detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't
add all datatype support until 4.1.1. If your application falls into this
narrow area, the behavior of CAST can be controlled using the :ref:`sqlalchemy.ext.compiler_toplevel`
system, as per the recipe below::
from sqlalchemy.sql.expression import _Cast
from sqlalchemy.ext.compiler import compiles
@compiles(_Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
If you have problems that seem server related, first check that you are
using the most recent stable MySQL-Python package available. The Database
Notes page on the wiki at http://www.sqlalchemy.org is a good resource for
timely information affecting MySQL in SQLAlchemy.
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument will be
simply passed through to the underlying CREATE INDEX command, so it *must* be
an integer. MySQL only allows a length for an index if it is for a CHAR,
VARCHAR, TEXT, BINARY, VARBINARY and BLOB.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
"""
import datetime, inspect, re, sys
@ -174,7 +237,7 @@ from array import array as _array
from sqlalchemy.engine import reflection
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
from sqlalchemy.util import topological
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
@ -231,9 +294,9 @@ SET_RE = re.compile(
class _NumericType(object):
"""Base for MySQL numeric types."""
def __init__(self, **kw):
self.unsigned = kw.pop('unsigned', False)
self.zerofill = kw.pop('zerofill', False)
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
@ -362,7 +425,7 @@ class DOUBLE(_FloatType):
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType):
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
@ -747,7 +810,7 @@ class CHAR(_StringType, sqltypes.CHAR):
__visit_name__ = 'CHAR'
def __init__(self, length, **kwargs):
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
@ -942,6 +1005,10 @@ class ENUM(sqltypes.Enum, _StringType):
return value
return process
def adapt(self, impltype, **kw):
kw['strict'] = self.strict
return sqltypes.Enum.adapt(self, impltype, **kw)
class SET(_StringType):
"""MySQL SET type."""
@ -988,8 +1055,8 @@ class SET(_StringType):
strip_values.append(a)
self.values = strip_values
length = max([len(v) for v in strip_values] + [0])
super(SET, self).__init__(length=length, **kw)
kw.setdefault('length', max([len(v) for v in strip_values] + [0]))
super(SET, self).__init__(**kw)
def result_processor(self, dialect, coltype):
def process(value):
@ -1113,6 +1180,9 @@ class MySQLExecutionContext(default.DefaultExecutionContext):
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'milliseconds': 'millisecond',
@ -1157,7 +1227,7 @@ class MySQLCompiler(compiler.SQLCompiler):
return 'CHAR'
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, NUMERIC):
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace('NUMERIC', 'DECIMAL')
else:
return None
@ -1180,6 +1250,15 @@ class MySQLCompiler(compiler.SQLCompiler):
return value
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
@ -1222,32 +1301,39 @@ class MySQLCompiler(compiler.SQLCompiler):
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit is None:
limit = 18446744073709551615
return ' \n LIMIT %s, %s' % (offset, limit)
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (limit,)
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
return ' \n LIMIT %s' % (self.process(sql.literal(limit)),)
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table) + \
" SET " + ', '.join(["%s=%s" % (self.preparer.format_column(c[0]), c[1]) for c in colparams])
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
limit = update_stmt.kwargs.get('mysql_limit', None)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
if limit:
text += " LIMIT %s" % limit
return "LIMIT %s" % limit
else:
return None
self.stack.pop(-1)
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table, extra_froms, **kw):
return None
return text
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
@ -1259,8 +1345,9 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
"""Get table constraints."""
constraint_string = super(MySQLDDLCompiler, self).create_table_constraints(table)
is_innodb = table.kwargs.has_key('mysql_engine') and \
table.kwargs['mysql_engine'].lower() == 'innodb'
engine_key = '%s_engine' % self.dialect.name
is_innodb = table.kwargs.has_key(engine_key) and \
table.kwargs[engine_key].lower() == 'innodb'
auto_inc_column = table._autoincrement_column
@ -1293,16 +1380,8 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
elif column.nullable and is_timestamp and default is None:
colspec.append('NULL')
if column.primary_key and column.autoincrement:
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)].pop(0)
if column is first:
colspec.append('AUTO_INCREMENT')
except IndexError:
pass
if column is column.table._autoincrement_column and column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
@ -1310,26 +1389,61 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
for k in table.kwargs:
if k.startswith('mysql_'):
opt = k[6:].upper()
arg = table.kwargs[k]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET', 'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
opts = dict(
(
k[len(self.dialect.name)+1:].upper(),
v
)
for k, v in table.kwargs.items()
if k.startswith('%s_' % self.dialect.name)
)
for opt in topological.sort([
('DEFAULT_CHARSET', 'COLLATE'),
('DEFAULT_CHARACTER_SET', 'COLLATE')
], opts):
arg = opts[opt]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " \
% (preparer.quote(self._index_identifier(index.name),
index.quote),preparer.format_table(index.table))
if 'mysql_length' in index.kwargs:
length = index.kwargs['mysql_length']
else:
length = None
if length is not None:
text+= "(%s(%d))" \
% (', '.join(preparer.quote(c.name, c.quote)
for c in index.columns), length)
else:
text+= "(%s)" \
% (', '.join(preparer.quote(c.name, c.quote)
for c in index.columns))
return text
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
def visit_drop_index(self, drop):
index = drop.element
@ -1408,17 +1522,25 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_, "NUMERIC(%(precision)s)" % {'precision': type_.precision})
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_, "NUMERIC(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale' : type_.scale})
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_, "DECIMAL(%(precision)s)" % {'precision': type_.precision})
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_, "DECIMAL(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale' : type_.scale})
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DOUBLE(self, type_):
if type_.precision is not None and type_.scale is not None:
@ -1437,8 +1559,11 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_):
if self._mysql_type(type_) and type_.scale is not None and type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale))
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(type_,
"FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))
else:
@ -1446,19 +1571,25 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "INTEGER(%(display_width)s)" % {'display_width': type_.display_width})
return self._extend_numeric(type_,
"INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "BIGINT(%(display_width)s)" % {'display_width': type_.display_width})
return self._extend_numeric(type_,
"BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "MEDIUMINT(%(display_width)s)" % {'display_width': type_.display_width})
return self._extend_numeric(type_,
"MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
@ -1470,7 +1601,10 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "SMALLINT(%(display_width)s)" % {'display_width': type_.display_width})
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
return self._extend_numeric(type_, "SMALLINT")
@ -1517,7 +1651,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.InvalidRequestError("VARCHAR requires a length when rendered on MySQL")
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
if type_.length:
@ -1531,7 +1667,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if type_.length:
return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.InvalidRequestError("NVARCHAR requires a length when rendered on MySQL")
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR".
@ -1685,7 +1823,7 @@ class MySQLDialect(default.DefaultDialect):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
@ -1741,7 +1879,7 @@ class MySQLDialect(default.DefaultDialect):
have = rs.rowcount > 0
rs.close()
return have
except exc.SQLError, e:
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
@ -1941,17 +2079,6 @@ class MySQLDialect(default.DefaultDialect):
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _adjust_casing(self, table, charset=None):
"""Adjust Table name to the server case sensitivity, if needed."""
casing = self._server_casing
# For winxx database hosts. TODO: is this really needed?
if casing == 1 and table.name != table.name.lower():
table.name = table.name.lower()
lc_alias = sa_schema._get_table_key(table.name, table.schema)
table.metadata.tables[lc_alias] = table
def _detect_charset(self, connection):
raise NotImplementedError()
@ -2029,7 +2156,7 @@ class MySQLDialect(default.DefaultDialect):
rp = None
try:
rp = connection.execute(st)
except exc.SQLError, e:
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
@ -2053,7 +2180,7 @@ class MySQLDialect(default.DefaultDialect):
try:
try:
rp = connection.execute(st)
except exc.SQLError, e:
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
@ -2185,7 +2312,7 @@ class MySQLTableDefinitionParser(object):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['mysql_%s' % opt] = val
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
@ -2432,9 +2559,7 @@ class MySQLTableDefinitionParser(object):
# PARTITION
#
# punt!
self._re_partition = _re_compile(
r' '
r'(?:SUB)?PARTITION')
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted strings we need to get rid of.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save