tldr: refactoring

This commit is contained in:
Romain J 2019-12-16 18:12:10 +01:00
commit f42b2194cd
2881 changed files with 568359 additions and 388 deletions
venv/lib/python3.7/site-packages/sqlalchemy/testing/plugin

View file

@ -0,0 +1,45 @@
"""
Bootstrapper for test framework plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
import os
import sys
bootstrap_file = locals()["bootstrap_file"]
to_bootstrap = locals()["to_bootstrap"]
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
if sys.version_info >= (3, 3):
from importlib import machinery
mod = machinery.SourceFileLoader(name, path).load_module()
else:
import imp
mod = imp.load_source(name, path)
return mod
if to_bootstrap == "pytest":
sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa

View file

@ -0,0 +1,734 @@
# plugin/plugin_base.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
created so that multiple test frameworks can be supported at once
(mostly so that we can migrate to new ones). The current target
is py.test.
"""
from __future__ import absolute_import
import abc
import re
import sys
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
ABC = abc.ABC
else:
import ConfigParser as configparser
import collections as collections_abc # noqa
class ABC(object):
__metaclass__ = abc.ABCMeta
# late imports
fixtures = None
engines = None
exclusions = None
warnings = None
profiling = None
assertions = None
requirements = None
config = None
testing = None
util = None
file_config = None
logging = None
include_tags = set()
exclude_tags = set()
options = None
def setup_options(make_option):
make_option(
"--log-info",
action="callback",
type="string",
callback=_log,
help="turn on info logging for <LOG> (multiple OK)",
)
make_option(
"--log-debug",
action="callback",
type="string",
callback=_log,
help="turn on debug logging for <LOG> (multiple OK)",
)
make_option(
"--db",
action="append",
type="string",
dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.",
)
make_option(
"--dbs",
action="callback",
zeroarg_callback=_list_dbs,
help="List available prefab dbs",
)
make_option(
"--dburi",
action="append",
type="string",
dest="dburi",
help="Database uri. Multiple OK, " "first one is run by default.",
)
make_option(
"--dropfirst",
action="store_true",
dest="dropfirst",
help="Drop all tables in the target database first",
)
make_option(
"--backend-only",
action="store_true",
dest="backend_only",
help="Run only tests marked with __backend__ or __sparse_backend__",
)
make_option(
"--nomemory",
action="store_true",
dest="nomemory",
help="Don't run memory profiling tests",
)
make_option(
"--postgresql-templatedb",
type="string",
help="name of template database to use for PostgreSQL "
"CREATE DATABASE (defaults to current database)",
)
make_option(
"--low-connections",
action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
"i.e. for Oracle TNS",
)
make_option(
"--write-idents",
type="string",
dest="write_idents",
help="write out generated follower idents to <file>, "
"when -n<num> is used",
)
make_option(
"--reversetop",
action="store_true",
dest="reversetop",
default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)",
)
make_option(
"--requirements",
action="callback",
type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg",
)
make_option(
"--with-cdecimal",
action="store_true",
dest="cdecimal",
default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' "
"for all tests",
)
make_option(
"--include-tag",
action="callback",
callback=_include_tag,
type="string",
help="Include tests with tag <tag>",
)
make_option(
"--exclude-tag",
action="callback",
callback=_exclude_tag,
type="string",
help="Exclude tests with tag <tag>",
)
make_option(
"--write-profiles",
action="store_true",
dest="write_profiles",
default=False,
help="Write/update failing profiling data.",
)
make_option(
"--force-write-profiles",
action="store_true",
dest="force_write_profiles",
default=False,
help="Unconditionally write/update profiling data.",
)
def configure_follower(follower_ident):
"""Configure required state for a follower.
This invokes in the parent process and typically includes
database creation.
"""
from sqlalchemy.testing import provision
provision.FOLLOWER_IDENT = follower_ident
def memoize_important_follower_config(dict_):
"""Store important configuration we will need to send to a follower.
This invokes in the parent process after normal config is set up.
This is necessary as py.test seems to not be using forking, so we
start with nothing in memory, *but* it isn't running our argparse
callables, so we have to just copy all of that over.
"""
dict_["memoized_config"] = {
"include_tags": include_tags,
"exclude_tags": exclude_tags,
}
def restore_important_follower_config(dict_):
"""Restore important configuration needed by a follower.
This invokes in the follower process.
"""
global include_tags, exclude_tags
include_tags.update(dict_["memoized_config"]["include_tags"])
exclude_tags.update(dict_["memoized_config"]["exclude_tags"])
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(["setup.cfg", "test.cfg"])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config.
global util, fixtures, engines, exclusions, assertions
global warnings, profiling, config, testing
from sqlalchemy import testing # noqa
from sqlalchemy.testing import fixtures, engines, exclusions # noqa
from sqlalchemy.testing import assertions, warnings, profiling # noqa
from sqlalchemy.testing import config # noqa
from sqlalchemy import util # noqa
warnings.setup_filters()
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith("-info"):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith("-debug"):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options("db")):
print("%20s\t%s" % (macro, file_config.get("db", macro)))
sys.exit(0)
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
def _exclude_tag(opt_str, value, parser):
exclude_tags.add(value.replace("-", "_"))
def _include_tag(opt_str, value, parser):
include_tags.add(value.replace("-", "_"))
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _set_nomemory(opt, file_config):
if opt.nomemory:
exclude_tags.add("memory_intensive")
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules["decimal"] = cdecimal
@post
def _init_symbols(options, file_config):
from sqlalchemy.testing import config
config._fixture_functions = _fixture_fn_class()
@post
def _engine_uri(options, file_config):
from sqlalchemy.testing import config
from sqlalchemy import testing
from sqlalchemy.testing import provision
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r"[,\s]+", db_token):
if db not in file_config.options("db"):
raise RuntimeError(
"Unknown URI specifier '%s'. "
"Specify --dbs for known uris." % db
)
else:
db_urls.append(file_config.get("db", db))
if not db_urls:
db_urls.append(file_config.get("db", "default"))
config._current = None
for db_url in db_urls:
if options.write_idents and provision.FOLLOWER_IDENT: # != 'master':
with open(options.write_idents, "a") as file_:
file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n")
cfg = provision.setup_config(
db_url, options, file_config, provision.FOLLOWER_IDENT
)
if not config._current:
cfg.set_as_current(cfg, testing)
@post
def _requirements(options, file_config):
requirement_cls = file_config.get("sqla_testing", "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from sqlalchemy.testing import config
from sqlalchemy import testing
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = testing.requires = req_cls()
@post
def _prep_testing_database(options, file_config):
from sqlalchemy.testing import config, util
from sqlalchemy.testing.exclusions import against
from sqlalchemy import schema, inspect
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(
schema._DropView(
schema.Table(vname, schema.MetaData())
)
)
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(
schema._DropView(
schema.Table(
vname,
schema.MetaData(),
schema="test_schema",
)
)
)
util.drop_all_tables(e, inspector)
if config.requirements.schemas.enabled_for_config(cfg):
util.drop_all_tables(e, inspector, schema=cfg.test_schema)
if against(cfg, "postgresql"):
from sqlalchemy.dialects import postgresql
for enum in inspector.get_enums("*"):
e.execute(
postgresql.DropEnumType(
postgresql.ENUM(
name=enum["name"], schema=enum["schema"]
)
)
)
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from sqlalchemy.testing import config
config.options = options
config.file_config = file_config
@post
def _setup_profiling(options, file_config):
from sqlalchemy.testing import profiling
profiling._profile_stats = profiling.ProfileStatsFile(
file_config.get("sqla_testing", "profile_file")
)
def want_class(name, cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif name.startswith("_"):
return False
elif (
config.options.backend_only
and not getattr(cls, "__backend__", False)
and not getattr(cls, "__sparse_backend__", False)
):
return False
else:
return True
def want_method(cls, fn):
if not fn.__name__.startswith("test_"):
return False
elif fn.__module__ is None:
return False
elif include_tags:
return (
hasattr(cls, "__tags__")
and exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags
)
) or (
hasattr(fn, "_sa_exclusion_extend")
and fn._sa_exclusion_extend.include_test(
include_tags, exclude_tags
)
)
elif exclude_tags and hasattr(cls, "__tags__"):
return exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags
)
elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"):
return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, "__backend__", False) or getattr(
cls, "__sparse_backend__", False
):
sparse = getattr(cls, "__sparse_backend__", False)
for cfg in _possible_configs_for_cls(cls, sparse=sparse):
orig_name = cls.__name__
# we can have special chars in these names except for the
# pytest junit plugin, which is tripped up by the brackets
# and periods, so sanitize
alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name)
alpha_name = re.sub(r"_+$", "", alpha_name)
name = "%s_%s" % (cls.__name__, alpha_name)
subcls = type(
name,
(cls,),
{"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg},
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
# from sqlalchemy import inspect
# assert not inspect(testing.db).get_table_names()
engines.testing_reaper._stop_test_ctx()
try:
if not options.low_connections:
assertions.global_cleanup_assertions()
finally:
_restore_engine()
def _restore_engine():
config._current.reset(testing)
def final_process_cleanup():
engines.testing_reaper._stop_test_ctx_aggressive()
assertions.global_cleanup_assertions()
_restore_engine()
def _setup_engine(cls):
if getattr(cls, "__engine_options__", None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng, testing)
def before_test(test, test_module_name, test_class, test_name):
# format looks like:
# "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause"
name = getattr(test_class, "_sa_orig_cls_name", test_class.__name__)
id_ = "%s.%s.%s" % (test_module_name, name, test_name)
profiling._current_test = id_
def after_test(test):
engines.testing_reaper._after_test_ctx()
def _possible_configs_for_cls(cls, reasons=None, sparse=False):
all_configs = set(config.Config.all_configs())
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on__", None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on_config__", None):
all_configs.intersection_update([cls.__only_on_config__])
if hasattr(cls, "__requires__"):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
skip_reasons = check.matching_config_reasons(config_obj)
if skip_reasons:
all_configs.remove(config_obj)
if reasons is not None:
reasons.extend(skip_reasons)
break
if hasattr(cls, "__prefer_requires__"):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if not check.enabled_for_config(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if sparse:
# pick only one config from each base dialect
# sorted so we get the same backend each time selecting the highest
# server version info.
per_dialect = {}
for cfg in reversed(
sorted(
all_configs,
key=lambda cfg: (
cfg.db.name,
cfg.db.dialect.server_version_info,
),
)
):
db = cfg.db.name
if db not in per_dialect:
per_dialect[db] = cfg
return per_dialect.values()
return all_configs
def _do_skips(cls):
reasons = []
all_configs = _possible_configs_for_cls(cls, reasons)
if getattr(cls, "__skip_if__", False):
for c in getattr(cls, "__skip_if__"):
if c():
config.skip_test(
"'%s' skipped by %s" % (cls.__name__, c.__name__)
)
if not all_configs:
msg = "'%s' unsupported on any DB implementation %s%s" % (
cls.__name__,
", ".join(
"'%s(%s)+%s'"
% (
config_obj.db.name,
".".join(
str(dig)
for dig in exclusions._server_version(config_obj.db)
),
config_obj.db.driver,
)
for config_obj in config.Config.all_configs()
),
", ".join(reasons),
)
config.skip_test(msg)
elif hasattr(cls, "__prefer_backends__"):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj, testing)
class FixtureFunctions(ABC):
@abc.abstractmethod
def skip_test_exception(self, *arg, **kw):
raise NotImplementedError()
@abc.abstractmethod
def combinations(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def param_ident(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def fixture(self, *arg, **kw):
raise NotImplementedError()
def get_current_test_name(self):
raise NotImplementedError()
_fixture_fn_class = None
def set_fixture_functions(fixture_fn_class):
global _fixture_fn_class
_fixture_fn_class = fixture_fn_class

View file

@ -0,0 +1,419 @@
try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import argparse
import collections
import inspect
import itertools
import operator
import os
import re
import sys
import pytest
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(
self, parser, namespace, values, option_string=None
):
callback_(option_string, values, parser)
kw["action"] = CallableAction
zeroarg_callback = kw.pop("zeroarg_callback", None)
if zeroarg_callback:
class CallableAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=False,
required=False,
help=None, # noqa
):
super(CallableAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
default=default,
required=required,
help=help,
)
def __call__(
self, parser, namespace, values, option_string=None
):
zeroarg_callback(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
pytest.register_assert_rewrite("sqlalchemy.testing.assertions")
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(config.slaveinput["follower_ident"])
else:
if config.option.write_idents and os.path.exists(
config.option.write_idents
):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(
bool(getattr(config.option, "cov_source", False))
)
plugin_base.set_fixture_functions(PytestFixtureFunctions)
def pytest_sessionstart(session):
plugin_base.post_begin()
def pytest_sessionfinish(session):
plugin_base.final_process_cleanup()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(
lambda: collections.defaultdict(list)
)
items[:] = [
item
for item in items
if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")
]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module
):
if sub_cls is not test_class.cls:
per_cls_dict = rebuilt_items[test_class.cls]
for inst in pytest.Class(
sub_cls.__name__, parent=test_class.parent.parent
).collect():
for t in inst.collect():
per_cls_dict[t.name].append(t)
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls][item.name])
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(
newitems,
key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name,
),
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(name, obj):
return [
pytest.Class(parametrize_cls.__name__, parent=collector)
for parametrize_cls in _parametrize_cls(collector.module, obj)
]
elif (
inspect.isfunction(obj)
and isinstance(collector, pytest.Instance)
and plugin_base.want_method(collector.cls, obj)
):
# None means, fall back to default logic, which includes
# method-level parametrize
return None
else:
# empty list means skip this item
return []
_current_class = None
def _parametrize_cls(module, cls):
"""implement a class-based version of pytest parametrize."""
if "_sa_parametrize" not in cls.__dict__:
return [cls]
_sa_parametrize = cls._sa_parametrize
classes = []
for full_param_set in itertools.product(
*[params for argname, params in _sa_parametrize]
):
cls_variables = {}
for argname, param in zip(
[_sa_param[0] for _sa_param in _sa_parametrize], full_param_set
):
if not argname:
raise TypeError("need argnames for class-based combinations")
argname_split = re.split(r",\s*", argname)
for arg, val in zip(argname_split, param.values):
cls_variables[arg] = val
parametrized_name = "_".join(
# token is a string, but in py2k py.test is giving us a unicode,
# so call str() on it.
str(re.sub(r"\W", "", token))
for param in full_param_set
for token in param.id.split("-")
)
name = "%s_%s" % (cls.__name__, parametrized_name)
newcls = type.__new__(type, name, (cls,), cls_variables)
setattr(module, name, newcls)
classes.append(newcls)
return classes
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(
item, item.parent.module.__name__, item.parent.cls, item.name
)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
def getargspec(fn):
if sys.version_info.major == 3:
return inspect.getfullargspec(fn)
else:
return inspect.getargspec(fn)
class PytestFixtureFunctions(plugin_base.FixtureFunctions):
def skip_test_exception(self, *arg, **kw):
return pytest.skip.Exception(*arg, **kw)
_combination_id_fns = {
"i": lambda obj: obj,
"r": repr,
"s": str,
"n": operator.attrgetter("__name__"),
}
def combinations(self, *arg_sets, **kw):
"""facade for pytest.mark.paramtrize.
Automatically derives argument names from the callable which in our
case is always a method on a class with positional arguments.
ids for parameter sets are derived using an optional template.
"""
from sqlalchemy.testing import exclusions
if sys.version_info.major == 3:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"):
arg_sets = list(arg_sets[0])
else:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"):
arg_sets = list(arg_sets[0])
argnames = kw.pop("argnames", None)
exclusion_combinations = []
def _filter_exclusions(args):
result = []
gathered_exclusions = []
for a in args:
if isinstance(a, exclusions.compound):
gathered_exclusions.append(a)
else:
result.append(a)
exclusion_combinations.extend(
[(exclusion, result) for exclusion in gathered_exclusions]
)
return result
id_ = kw.pop("id_", None)
if id_:
_combination_id_fns = self._combination_id_fns
# because itemgetter is not consistent for one argument vs.
# multiple, make it multiple in all cases and use a slice
# to omit the first argument
_arg_getter = operator.itemgetter(
0,
*[
idx
for idx, char in enumerate(id_)
if char in ("n", "r", "s", "a")
]
)
fns = [
(operator.itemgetter(idx), _combination_id_fns[char])
for idx, char in enumerate(id_)
if char in _combination_id_fns
]
arg_sets = [
pytest.param(
*_arg_getter(_filter_exclusions(arg))[1:],
id="-".join(
comb_fn(getter(arg)) for getter, comb_fn in fns
)
)
for arg in arg_sets
]
else:
# ensure using pytest.param so that even a 1-arg paramset
# still needs to be a tuple. otherwise paramtrize tries to
# interpret a single arg differently than tuple arg
arg_sets = [
pytest.param(*_filter_exclusions(arg)) for arg in arg_sets
]
def decorate(fn):
if inspect.isclass(fn):
if "_sa_parametrize" not in fn.__dict__:
fn._sa_parametrize = []
fn._sa_parametrize.append((argnames, arg_sets))
return fn
else:
if argnames is None:
_argnames = getargspec(fn).args[1:]
else:
_argnames = argnames
if exclusion_combinations:
for exclusion, combination in exclusion_combinations:
combination_by_kw = {
argname: val
for argname, val in zip(_argnames, combination)
}
exclusion = exclusion.with_combination(
**combination_by_kw
)
fn = exclusion(fn)
return pytest.mark.parametrize(_argnames, arg_sets)(fn)
return decorate
def param_ident(self, *parameters):
ident = parameters[0]
return pytest.param(*parameters[1:], id=ident)
def fixture(self, *arg, **kw):
return pytest.fixture(*arg, **kw)
def get_current_test_name(self):
return os.environ.get("PYTEST_CURRENT_TEST")