未验证 提交 3d269150 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'beraldoleal/suites/refactoring'

Signed-off-by: NCleber Rosa <crosa@redhat.com>
......@@ -17,6 +17,7 @@
Job module - describes a sequence of automated test operations.
"""
import logging
import os
import pprint
......@@ -26,8 +27,10 @@ import sys
import tempfile
import time
import traceback
import warnings
from copy import deepcopy
from ..utils import astring, path, process
from ..utils import astring
from ..utils.data_structures import CallbackRegister, time_to_seconds
from . import (data_dir, dispatcher, exceptions, exit_codes, jobdata, output,
result, version)
......@@ -35,6 +38,7 @@ from .future.settings import settings
from .job_id import create_unique_job_id
from .output import LOG_JOB, LOG_UI, STD_OUTPUT
from .suite import TestSuite, TestSuiteError
from .utils import get_avocado_git_version
_NEW_ISSUE_LINK = 'https://github.com/avocado-framework/avocado/issues/new'
......@@ -68,21 +72,48 @@ register_job_options()
class Job:
"""
A Job is a set of operations performed on a test machine.
"""A Job is a set of operations performed on a test machine.
Most of the time, we are interested in simply running tests,
along with setup operations and event recording.
A job has multiple test suites attached to it. Please keep in mind that
when creating jobs from the constructor (`Job()`), we are assuming that you
would like to have control of the test suites and you are going to build
your own TestSuites.
If you would like any help to create the job's test_suites from the config
provided, please use `Job.from_config()` method and we are going to do our
best to create the test suites.
So, basically, as described we have two "main ways" to create a job:
1. Automatic discovery, using `from_config()` method::
job = Job.from_config(job_config=job_config,
suites_configs=[suite_cfg1, suite_cfg2])
2. Manual or Custom discovery, using the constructor::
job = Job(config=config,
test_suites=[suite1, suite2, suite3])
"""
def __init__(self, config=None):
"""
Creates an instance of Job class.
def __init__(self, config=None, test_suites=None):
"""Creates an instance of Job class.
Note that `config` and `test_suites` are optional, if not passed you
need to change this before running your tests. Otherwise nothing will
run. If you need any help to create the test_suites from the config,
then use the `Job.from_config()` method.
:param config: the job configuration, usually set by command
line options and argument parsing
:type config: dict
:param test_suites: A list with TestSuite objects. If is None the job
will have an empty list and you can add suites
after init accessing job.test_suites.
:type test_suites: list
"""
self.config = settings.as_dict()
if config:
......@@ -96,6 +127,8 @@ class Job:
self.config['run.unique_job_id'] = '0' * 40
self.config['sysinfo.collect.enabled'] = 'off'
self.test_suites = test_suites or []
#: The log directory for this job, also known as the job results
#: directory. If it's set to None, it means that the job results
#: directory has not yet been created.
......@@ -107,7 +140,6 @@ class Job:
self.result = None
self.interrupted_reason = None
self._test_parameters = None
self._timeout = None
self._unique_id = None
......@@ -124,11 +156,6 @@ class Job:
self._stdout_stderr = None
self.replay_sourcejob = self.config.get('replay_sourcejob')
self.exitcode = exit_codes.AVOCADO_ALL_OK
#: The list of discovered/resolved tests that will be attempted to
#: be run by this job. If set to None, it means that test resolution
#: has not been attempted. If set to an empty list, it means that no
#: test was found during resolution.
self.test_suite = None
# The result events dispatcher is shared with the test runner.
# Because of our goal to support using the phases of a job
......@@ -163,10 +190,7 @@ class Job:
# TODO: Fix this, this is one of the few cases where using the config
# generated from the new settings with a hardcoded 'default' value
try:
store_logging_stream = self.config.get('run.store_logging_stream', [])
except AttributeError:
store_logging_stream = []
store_logging_stream = self.config.get('run.store_logging_stream', [])
for name in store_logging_stream:
name = re.split(r'(?<!\\):', name, maxsplit=1)
......@@ -212,33 +236,6 @@ class Job:
for logger in loggers:
logging.getLogger(logger).removeHandler(handler)
@staticmethod
def _get_avocado_git_version():
# if running from git sources, there will be a ".git" directory
# 3 levels up
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
git_dir = os.path.join(base_dir, '.git')
if not os.path.isdir(git_dir):
return
if not os.path.exists(os.path.join(base_dir, 'python-avocado.spec')):
return
try:
git = path.find_command('git')
except path.CmdNotFoundError:
return
olddir = os.getcwd()
try:
os.chdir(os.path.abspath(base_dir))
cmd = "%s show --summary --pretty='%%H'" % git
res = process.run(cmd, ignore_status=True, verbose=False)
if res.exit_status == 0:
top_commit = res.stdout_text.splitlines()[0][:8]
return " (GIT commit %s)" % top_commit
finally:
os.chdir(olddir)
def _log_avocado_config(self):
LOG_JOB.info('Avocado config:')
LOG_JOB.info('')
......@@ -257,7 +254,7 @@ class Job:
def _log_avocado_version(self):
version_log = version.VERSION
git_version = self._get_avocado_git_version()
git_version = get_avocado_git_version()
if git_version is not None:
version_log += git_version
LOG_JOB.info('Avocado version: %s', version_log)
......@@ -269,7 +266,7 @@ class Job:
LOG_JOB.info("Command line: %s", cmdline)
LOG_JOB.info('')
def _log_job_debug_info(self, variants):
def _log_job_debug_info(self):
"""
Log relevant debug information to the job log.
"""
......@@ -277,7 +274,8 @@ class Job:
self._log_avocado_version()
self._log_avocado_config()
self._log_avocado_datadir()
self._log_variants(variants)
for suite in self.test_suites:
self._log_variants(suite.variants)
self._log_tmp_dir()
self._log_job_id()
......@@ -393,19 +391,60 @@ class Job:
if os.path.exists(proc_latest):
os.unlink(proc_latest)
@classmethod
def from_config(cls, job_config, suites_configs=None):
"""Helper method to create a job from config dicts.
This is different from the Job() initialization because here we are
assuming that you need some help to build the test suites. Avocado will
try to resolve tests based on the configuration information insead of
assuming pre populated test suites.
Keep in mind that here we are going to replace the suite.name with a
counter.
If you need create a custom Job with your own TestSuites, please use
the Job() constructor instead of this method.
:param job_config: A config dict to be used on this job and also as a
'global' config for each test suite.
:type job_config: dict
:param suites_configs: A list of specific config dict to be used on
each test suite. Each suite config will be
merged with the job_config dict. If None is
passed then this job will have only one
test_suite with the same config as job_config.
:type suites_configs: list
"""
suites_configs = suites_configs or [deepcopy(job_config)]
suites = []
for index, config in enumerate(suites_configs, start=1):
suites.append(TestSuite.from_config(config,
name=index,
job_config=job_config))
return cls(job_config, suites)
@property
def test_parameters(self):
"""Placeholder for test parameters.
def size(self):
"""Job size is the sum of all test suites sizes."""
return sum([suite.size for suite in self.test_suites])
This is related to --test-parameters command line option. They're kept
in the job because they will be prepared only once, since they are read
only and will be shared across all tests of a job.
@property
def test_suite(self):
"""This is the first test suite of this job (deprecated).
Please, use test_suites instead.
"""
if self._test_parameters is None:
self._test_parameters = {name: value for name, value
in self.config.get('run.test_parameters',
[])}
return self._test_parameters
if self.test_suites:
return self.test_suites[0]
@test_suite.setter
def test_suite(self, var):
"""Temporary setter. Suites should be set from test_suites."""
if self.test_suites:
self.test_suites[0] = var
else:
self.test_suites = [var]
@property
def timeout(self):
......@@ -445,16 +484,20 @@ class Job:
pass
def create_test_suite(self):
msg = ("create_test_suite() is deprecated. You can also create your "
"own suites with TestSuite() or TestSuite.from_config().")
warnings.warn(msg, DeprecationWarning)
try:
self.test_suite = TestSuite.from_config(self.config)
if self.test_suite.size == 0:
if self.test_suite and self.test_suite.size == 0:
refs = self.test_suite.references
msg = ("No tests found for given test references, try "
"'avocado list -V %s' for details") % " ".join(refs)
raise exceptions.JobTestSuiteEmptyError(msg)
except TestSuiteError as details:
raise exceptions.JobBaseException(details)
self.result.tests_total = self.test_suite.size
if self.test_suite:
self.result.tests_total = self.test_suite.size
def post_tests(self):
"""
......@@ -498,7 +541,7 @@ class Job:
if self.time_start == -1:
self.time_start = time.time()
try:
self.create_test_suite()
self.result.tests_total = self.size
self.pre_tests()
return self.run_tests()
except exceptions.JobBaseException as details:
......@@ -537,24 +580,22 @@ class Job:
"""
The actual test execution phase
"""
self._log_job_debug_info(self.test_suite.variants)
jobdata.record(self.config,
self.logdir,
self.test_suite.variants,
sys.argv)
self._log_job_debug_info()
jobdata.record(self, sys.argv)
if not self.test_suites:
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode
# This is "almost ready" for a loop
summary = self.test_suite.run(self)
summary = set()
for suite in self.test_suites:
summary |= suite.run(self)
# If it's all good so far, set job status to 'PASS'
if self.status == 'RUNNING':
self.status = 'PASS'
LOG_JOB.info('Test results available in %s', self.logdir)
if summary is None:
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode
if 'INTERRUPTED' in summary:
self.exitcode |= exit_codes.AVOCADO_JOB_INTERRUPTED
if 'FAIL' in summary:
......
......@@ -33,7 +33,7 @@ JOB_CONFIG_FILENAME = 'args.json'
CMDLINE_FILENAME = 'cmdline'
def record(config, logdir, variants, cmdline=None):
def record(job, cmdline=None):
"""
Records all required job information.
"""
......@@ -41,7 +41,7 @@ def record(config, logdir, variants, cmdline=None):
for log in [LOG_UI, LOG_JOB]:
log.warning("jobdata.variants: Unable to serialize '%s'", item)
return str(item)
base_dir = init_dir(logdir, JOB_DATA_DIR)
base_dir = init_dir(job.logdir, JOB_DATA_DIR)
path_cfg = os.path.join(base_dir, CONFIG_FILENAME)
path_references = os.path.join(base_dir, TEST_REFERENCES_FILENAME)
path_variants = os.path.join(base_dir, VARIANTS_FILENAME)
......@@ -49,7 +49,7 @@ def record(config, logdir, variants, cmdline=None):
path_job_config = os.path.join(base_dir, JOB_CONFIG_FILENAME)
path_cmdline = os.path.join(base_dir, CMDLINE_FILENAME)
references = config.get('run.references')
references = job.config.get('run.references')
if references:
with open(path_references, 'w') as references_file:
references_file.write('%s' % references)
......@@ -62,7 +62,10 @@ def record(config, logdir, variants, cmdline=None):
os.fsync(config_file)
with open(path_variants, 'w') as variants_file:
json.dump(variants.dump(), variants_file, default=json_bad_variants_obj)
variants = []
for suite in job.test_suites:
variants.append(suite.variants.dump())
json.dump(variants, variants_file, default=json_bad_variants_obj)
variants_file.flush()
os.fsync(variants_file)
......@@ -72,7 +75,7 @@ def record(config, logdir, variants, cmdline=None):
os.fsync(pwd_file)
with open(path_job_config, 'w') as job_config_file:
json.dump(config, job_config_file, default=lambda x: None)
json.dump(job.config, job_config_file, default=lambda x: None)
job_config_file.flush()
os.fsync(job_config_file)
......
from enum import Enum
from uuid import uuid1
from uuid import uuid4
from .dispatcher import RunnerDispatcher
from .exceptions import OptionValidationError
......@@ -24,19 +24,22 @@ class TestSuiteStatus(Enum):
class TestSuite:
def __init__(self, name, config, tests=None):
def __init__(self, name, config, tests=None, job_config=None):
self.name = name
self.tests = tests
# Create a complete config dict with all registered options + custom
# config
self.config = settings.as_dict()
if job_config:
self.config.update(job_config)
if config:
self.config.update(config)
self._variants = None
self._references = None
self._runner = None
self._test_parameters = None
if (config.get('run.dry_run.enabled') and
self.config.get('run.test_runner') == 'runner'):
......@@ -70,7 +73,7 @@ class TestSuite:
except (LoaderUnhandledReferenceError, LoaderError) as details:
raise TestSuiteError(details)
return cls(name=name or str(uuid1),
return cls(name=name or str(uuid4()),
config=config,
tests=tests)
......@@ -81,7 +84,7 @@ class TestSuite:
resolutions = resolve(references, ignore_missing=ignore_missing)
tasks = resolutions_to_tasks(resolutions, config)
return cls(name=name or str(uuid1),
return cls(name=name or str(uuid4()),
config=config,
tests=tasks)
......@@ -120,6 +123,19 @@ class TestSuite:
else:
return TestSuiteStatus.UNKNOWN
@property
def test_parameters(self):
"""Placeholder for test parameters.
This is related to --test-parameters command line option or
(run.test_parameters).
"""
if self._test_parameters is None:
self._test_parameters = {name: value for name, value
in self.config.get('run.test_parameters',
[])}
return self._test_parameters
@property
def variants(self):
if self._variants is None:
......@@ -142,7 +158,29 @@ class TestSuite:
return self.runner.run_suite(job, self)
@classmethod
def from_config(cls, config, name=None):
def from_config(cls, config, name=None, job_config=None):
"""Helper method to create a TestSuite from config dicts.
This is different from the TestSuite() initialization because here we
are assuming that you need some help to build the test suite. Avocado
will try to resolve tests based on the configuration information insead
of assuming pre populated tests.
If you need to create a custom TestSuite, please use the TestSuite()
constructor instead of this method.
:param config: A config dict to be used on the desired test suite.
:type config: dict
:param name: The name of the test suite. This is optional and default
is a random uuid.
:type name: str
:param job_config: The job config dict (a global config). Use this to
avoid huge configs per test suite. This is also
optional.
:type job_config: dict
"""
if job_config:
config.update(job_config)
runner = config.get('run.test_runner') or 'runner'
if runner == 'nrunner':
suite = cls._from_config_with_resolver(config, name)
......
......@@ -3,11 +3,39 @@ from uuid import uuid1
from pkg_resources import get_distribution
from ..utils import path, process
from .nrunner import Task
from .resolver import ReferenceResolutionResult
from .tags import filter_test_tags_runnable
def get_avocado_git_version():
# if running from git sources, there will be a ".git" directory
# 3 levels up
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
git_dir = os.path.join(base_dir, '.git')
if not os.path.isdir(git_dir):
return
if not os.path.exists(os.path.join(base_dir, 'python-avocado.spec')):
return
try:
git = path.find_command('git')
except path.CmdNotFoundError:
return
olddir = os.getcwd()
try:
os.chdir(os.path.abspath(base_dir))
cmd = "%s show --summary --pretty='%%H'" % git
res = process.run(cmd, ignore_status=True, verbose=False)
if res.exit_status == 0:
top_commit = res.stdout_text.splitlines()[0][:8]
return " (GIT commit %s)" % top_commit
finally:
os.chdir(olddir)
def prepend_base_path(value):
expanded = os.path.expanduser(value)
if not expanded.startswith(('/', '~', '.')):
......
......@@ -333,14 +333,20 @@ class Varianter:
@classmethod
def from_resultsdir(cls, resultsdir):
"""
Retrieves the job variants object from the results directory.
Retrieves the job variants objects from the results directory.
This will return a list of variants since a Job can have multiple
suites and the variants is per suite.
"""
path = os.path.join(resultsdir, 'jobdata', VARIANTS_FILENAME)
if not os.path.exists(path):
return None
variants = []
with open(path, 'r') as variants_file:
return cls(state=json.load(variants_file))
for variant in json.load(variants_file):
variants.append(cls(state=variant))
return variants
def __len__(self):
return self._no_variants
......@@ -258,18 +258,19 @@ class FetchAssetJob(JobPreTests): # pylint: disable=R0903
logger = job.log
else:
logger = None
for test in job.test_suite.tests:
# ignore nrunner/resolver based test suites that contain
# task, because the requirements resolution planned is
# completely different from the traditional job runner
if isinstance(test, Task):
continue
# fetch assets only on instrumented tests
if isinstance(test[0], str):
fetch_assets(test[1]['modulePath'],
test[0],
test[1]['methodName'],
logger)
for suite in job.test_suites:
for test in suite.tests:
# ignore nrunner/resolver based test suites that contain
# task, because the requirements resolution planned is
# completely different from the traditional job runner
if isinstance(test, Task):
continue
# fetch assets only on instrumented tests
if isinstance(test[0], str):
fetch_assets(test[1]['modulePath'],
test[0],
test[1]['methodName'],
logger)
class Assets(CLICmd):
......
......@@ -390,7 +390,8 @@ class Diff(CLICmd):
results = []
variants = Varianter.from_resultsdir(resultsdir)
if variants is not None:
results.extend(variants.to_str(variants=2).splitlines())
for variant in variants:
results.extend(variant.to_str(variants=2).splitlines())
else:
results.append('Not found\n')
......
......@@ -24,6 +24,7 @@ from avocado.core.dispatcher import JobPrePostDispatcher
from avocado.core.future.settings import settings
from avocado.core.output import LOG_UI
from avocado.core.plugin_interfaces import CLICmd, Init
from avocado.core.suite import TestSuite, TestSuiteError
from avocado.utils import process
......@@ -293,7 +294,14 @@ class Run(CLICmd):
LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')
sys.exit(exit_codes.AVOCADO_FAIL)
with job.Job(config) as job_instance:
try:
suite = TestSuite.from_config(config, name='suite01')
if suite.size == 0:
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
except TestSuiteError as err:
LOG_UI.error(err)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
with job.Job(config, [suite]) as job_instance:
pre_post_dispatcher = JobPrePostDispatcher()
try:
# Run JobPre plugins
......
......@@ -313,22 +313,21 @@ class TestRunner(Runner):
"variant_id": varianter.generate_variant_id(var),
"paths": paths}
def _iter_suite(self, job, test_suite, execution_order):
def _iter_suite(self, test_suite, execution_order):
"""
Iterates through test_suite and variants in defined order
:param job: an instance of :class:`avocado.core.job.Job`
:param test_suite: a TestSuite object to run
:param execution_order: way of iterating through tests/variants
:return: generator yielding tuple(test_factory, variant)
"""
if execution_order == "variants-per-test":
return (self._template_to_factory(job.test_parameters,
return (self._template_to_factory(test_suite.test_parameters,
template, variant)
for template in test_suite.tests
for variant in test_suite.variants.itertests())
elif execution_order == "tests-per-variant":
return (self._template_to_factory(job.test_parameters,
return (self._template_to_factory(test_suite.test_parameters,
template, variant)
for variant in test_suite.variants.itertests()
for template in test_suite.tests)
......@@ -361,8 +360,7 @@ class TestRunner(Runner):
for test_factory in test_suite.tests:
test_factory[1]["base_logdir"] = job.logdir
test_factory[1]["job"] = job
for test_factory, variant in self._iter_suite(job,
test_suite,
for test_factory, variant in self._iter_suite(test_suite,
execution_order):
test_parameters = test_factory[1]
name = test_parameters.get("name")
......
......@@ -88,7 +88,10 @@ class Runner(RunnerInterface):
task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS
# this is all rubbish data
test_id = TestID(index, task.runnable.uri, None, no_digits)
test_id = TestID("{}-{}".format(test_suite.name, index),
task.runnable.uri,
None,
no_digits)
task.identifier = str(test_id)
early_state = {
'name': test_id,
......
......@@ -3,6 +3,7 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {
'run.test_runner': 'nrunner',
......@@ -14,5 +15,6 @@ config = {
],
}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
#!/usr/bin/env python3
import sys
from avocado.core.job import Job
config = {'run.references': ['examples/tests/passtest.py:PassTest.test']}
job_config = {'run.test_runner': 'nrunner',
'run.references': ['examples/tests/passtest.py:PassTest.test']}
# Automatic helper method (Avocado will try to discovery things from config
# dicts. Since there is magic here, we dont need to pass suite names or suites,
# and test/task id will be prepend with the suite index (in this case 1 and 2)
with Job(config) as j:
sys.exit(j.run())
job = Job.from_config(job_config=job_config)
job.setup()
sys.exit(job.run())
......@@ -3,9 +3,11 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/passtest.py:PassTest.test'],
'cit_parameter_file': 'examples/varianter_cit/test_params.cit'}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
job_config = {'run.test_runner': 'nrunner'}
config1 = {'run.references': ['examples/tests/passtest.py:PassTest.test']}
config2 = {'run.references': ['examples/tests/passtest.py:PassTest.test']}
# Custom method (no discovery, no guess, no magic)
# Since there is no magic, we need to pass a suite name, otherwise a uuid4 will
# be used for suite.name. Also run.references will be ignored (Avocado will not
# creating tests suites for you).
suite1 = TestSuite(config=config1, tests=[], name='suite1')
suite2 = TestSuite(config=config2, tests=[], name='suite2')
with Job(job_config, [suite1, suite2]) as j:
sys.exit(j.run())
......@@ -3,10 +3,12 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/passtest.py:PassTest.test'],
'job.run.result.html.enabled': 'on',
'run.open_browser': True}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,6 +3,7 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
'run.dict_variants': [
......@@ -10,5 +11,6 @@ config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
{'sleep_length': "1.0"}
]}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -3,9 +3,11 @@
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {'run.references': ['examples/tests/sleeptest.py:SleepTest.test'],
'json.variants.load': 'examples/tests/sleeptest.py.data/sleeptest.json'}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -176,6 +176,7 @@ class ReplayTests(unittest.TestCase):
"%d:\n%s" % (cmd_line, expected_rc, result))
return result
@unittest.skip("Waiting for #4062 be closed")
def test_run_replay_and_mux(self):
"""
Runs a replay job and specifies multiplex file (which should be
......
......@@ -21,8 +21,9 @@ class Test(TestCaseTmpDir):
def test_job_run_result_json_enabled(self):
self.base_config['job.run.result.json.enabled'] = 'on'
with Job(self.base_config) as j:
result = j.run()
j = Job.from_config(self.base_config)
j.setup()
result = j.run()
self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
json_results_path = os.path.join(self.tmpdir.name, 'latest', 'results.json')
self.assertTrue(os.path.exists(json_results_path))
......@@ -30,8 +31,9 @@ class Test(TestCaseTmpDir):
def test_job_run_result_json_output(self):
json_results_path = os.path.join(self.tmpdir.name, 'myresults.json')
self.base_config['job.run.result.json.output'] = json_results_path
with Job(self.base_config) as j:
result = j.run()
j = Job.from_config(self.base_config)
j.setup()
result = j.run()
self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
self.assertTrue(os.path.exists(json_results_path))
......
......@@ -128,32 +128,37 @@ import sys
from avocado import Test
from avocado.core.job import Job
from avocado.core.suite import TestSuite
class PassTest(Test):
def test1(self):
config = {'core.show': ['none'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
def test2(self):
config = {'core.show': ['app'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
def test3(self):
config = {'core.show': ['none'],
'run.references': ['/bin/true']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
j.run()
if __name__ == '__main__':
config = {'run.references': [__file__],
'core.show': ['app']}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
"""
......
......@@ -14,5 +14,6 @@ config = {'run.references': ['selftests/functional/test_nrunner_interface.py'],
{'runner': 'avocado-runner-tap'},
]}
with Job(config) as j:
sys.exit(j.run())
job = Job.from_config(config)
job.setup()
sys.exit(job.run())
......@@ -6,6 +6,7 @@ import os
import sys
from avocado.core.job import Job
from avocado.core.suite import TestSuite
config = {
'run.references': [
......@@ -14,5 +15,6 @@ config = {
]
}
with Job(config) as j:
suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
sys.exit(j.run())
......@@ -126,10 +126,9 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(len(simple_tests_found), len(self.job.test_suite))
self.assertEqual(len(simple_tests_found), len(self.job.test_suites[0]))
def test_job_pre_tests(self):
class JobFilterTime(job.Job):
......@@ -151,9 +150,8 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = JobFilterTime(config)
self.job = JobFilterTime.from_config(config)
self.job.setup()
self.job.create_test_suite()
try:
self.job.pre_tests()
finally:
......@@ -166,9 +164,8 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(self.job.run_tests(),
exit_codes.AVOCADO_ALL_OK)
......@@ -198,18 +195,19 @@ class JobTest(unittest.TestCase):
def test_job_run(self):
class JobFilterLog(job.Job):
def pre_tests(self):
filtered_test_suite = []
for test_factory in self.test_suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
for suite in self.test_suites:
filtered_test_suite = []
for test_factory in suite.tests:
if self.config.get('run.test_runner') == 'runner':
if test_factory[0] is test.SimpleTest:
if not test_factory[1].get('name', '').endswith('time'):
filtered_test_suite.append(test_factory)
elif self.config.get('run.test_runner') == 'nrunner':
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
elif self.config.get('run.test_runner') == 'nrunner':
task = test_factory
if not task.runnable.url.endswith('time'):
filtered_test_suite.append(test_factory)
self.test_suite.tests = filtered_test_suite
super(JobFilterLog, self).pre_tests()
suite.tests = filtered_test_suite
super(JobFilterLog, self).pre_tests()
def post_tests(self):
with open(os.path.join(self.logdir, "reversed_id"), "w") as f:
......@@ -220,11 +218,11 @@ class JobTest(unittest.TestCase):
'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
'run.references': simple_tests_found}
self.job = JobFilterLog(config)
self.job = JobFilterLog.from_config(config)
self.job.setup()
self.assertEqual(self.job.run(),
exit_codes.AVOCADO_ALL_OK)
self.assertLessEqual(len(self.job.test_suite), 1)
self.assertLessEqual(len(self.job.test_suites), 1)
with open(os.path.join(self.job.logdir, "reversed_id")) as reverse_id_file:
self.assertEqual(self.job.unique_id[::-1],
reverse_id_file.read())
......@@ -262,6 +260,15 @@ class JobTest(unittest.TestCase):
self.assertEqual(self.job.time_end, 20.0)
self.assertEqual(self.job.time_elapsed, 100.0)
def test_job_suites_config(self):
config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none'],
'run.references': ['/bin/true']}
suite_config = {'run.references': ['/bin/false']}
self.job = job.Job.from_config(config, [suite_config])
self.assertEqual(self.job.config.get('run.references'), ['/bin/true'])
def test_job_dryrun_no_unique_job_id(self):
config = {'run.results_dir': self.tmpdir.name,
'run.store_logging_stream': [],
......@@ -282,6 +289,31 @@ class JobTest(unittest.TestCase):
self.assertEqual(os.path.dirname(self.job.logdir), self.tmpdir.name)
self.assertTrue(os.path.isfile(os.path.join(self.job.logdir, 'id')))
def test_job_suite_parent_config(self):
"""This will test if test suites are inheriting configs from job."""
config = {'core.show': ['none'],
'run.results_dir': self.tmpdir.name}
suite_config = {'run.references': ['/bin/true']}
# Manual/Custom method
suite = TestSuite('foo-test', config=suite_config, job_config=config)
self.job = job.Job(config, [suite])
self.assertEqual(self.job.test_suites[0].config.get('run.results_dir'),
self.tmpdir.name)
# Automatic method passing suites
self.job = job.Job.from_config(job_config=config,
suites_configs=[suite_config])
self.assertEqual(self.job.test_suites[0].config.get('run.results_dir'),
self.tmpdir.name)
# Automatic method passing only one config
config.update({'run.references': ['/bin/true']})
self.job = job.Job.from_config(job_config=config)
self.assertEqual(self.job.test_suites[0].config.get('run.results_dir'),
self.tmpdir.name)
def test_job_dryrun_no_base_logdir(self):
config = {'core.show': ['none'],
'run.store_logging_stream': [],
......@@ -299,9 +331,8 @@ class JobTest(unittest.TestCase):
'run.references': simple_tests_found,
'run.test_runner': 'nrunner',
'core.show': ['none']}
self.job = job.Job(config)
self.job = job.Job.from_config(config)
self.job.setup()
self.job.create_test_suite()
self.assertEqual(len(simple_tests_found), len(self.job.test_suite))
if self.job.test_suite:
self.assertIsInstance(self.job.test_suite.tests[0], nrunner.Task)
......
import tempfile
import unittest.mock
from avocado.core import data_dir
from avocado.core.suite import TestSuite
from avocado.utils import path as utils_path
from .. import setup_avocado_loggers, temp_dir_prefix
setup_avocado_loggers()
class TestSuiteTest(unittest.TestCase):
def setUp(self):
self.suite = None
data_dir._tmp_tracker.unittest_refresh_dir_tracker()
prefix = temp_dir_prefix(__name__, self, 'setUp')
self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
@staticmethod
def _find_simple_test_candidates(candidates=None):
if candidates is None:
candidates = ['true']
found = []
for candidate in candidates:
try:
found.append(utils_path.find_command(candidate))
except utils_path.CmdNotFoundError:
pass
return found
def test_custom_suite(self):
"""Custom suites should assume custom tests.
When using custom suites (from constructor) we are assuming no
magic, no tests should be created from run.references.
"""
tests = self._find_simple_test_candidates()
config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none'],
'run.references': tests}
self.suite = TestSuite(name='foo', config=config)
self.assertEqual(0, self.suite.size)
def test_automatic_suite(self):
"""Automatic suites should create tests.
When using automatic suites we are assuming magic,
and, tests should be created from run.references.
"""
tests = self._find_simple_test_candidates()
config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none'],
'run.references': tests}
self.suite = TestSuite.from_config(config=config)
self.assertEqual(1, self.suite.size)
def test_config_extend_manual(self):
"""Test extends config from job when using manual method."""
tests = self._find_simple_test_candidates()
job_config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none']}
suite_config = {'run.references': tests}
self.suite = TestSuite(name='foo',
config=suite_config,
job_config=job_config)
self.assertEqual(self.suite.config.get('core.show'), ['none'])
def test_config_extend_automatic(self):
"""Test extends config from job when using automatic method."""
tests = self._find_simple_test_candidates()
job_config = {'run.results_dir': self.tmpdir.name,
'core.show': ['none']}
suite_config = {'run.references': tests}
self.suite = TestSuite.from_config(config=suite_config,
job_config=job_config)
self.assertEqual(self.suite.config.get('core.show'), ['none'])
def tearDown(self):
data_dir._tmp_tracker.unittest_refresh_dir_tracker()
self.tmpdir.cleanup()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册