summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeil Williams <neil.williams@linaro.org>2014-09-18 22:06:47 (GMT)
committerLinaro Code Review <review@review.linaro.org>2014-09-25 11:53:33 (GMT)
commitc660d3c57948f91f6bdf108a70cd9ea1a7bef4fb (patch)
tree07dc155399de657bdc38afe880fa675af320fa12
parent7b16029a4ab28c8de227e45ae2500945c083dc50 (diff)
downloadlava-dispatcher-c660d3c57948f91f6bdf108a70cd9ea1a7bef4fb.tar.gz
lava-dispatcher-c660d3c57948f91f6bdf108a70cd9ea1a7bef4fb.tar.xz
Run commands inside the shellconnection
Populate the test definition metadata. Change-Id: I9f3d48a35fd15a456b117a9cfafefe0c005bac3f
-rw-r--r--lava_dispatcher/pipeline/action.py9
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/image.py1
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/overlay.py72
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/testdef.py184
-rw-r--r--lava_dispatcher/pipeline/actions/test/__init__.py7
-rw-r--r--lava_dispatcher/pipeline/shell.py4
-rw-r--r--lava_dispatcher/pipeline/test/test_defs.py12
-rw-r--r--lava_dispatcher/pipeline/test/test_job.py2
8 files changed, 219 insertions, 72 deletions
diff --git a/lava_dispatcher/pipeline/action.py b/lava_dispatcher/pipeline/action.py
index 52ff659..55b8625 100644
--- a/lava_dispatcher/pipeline/action.py
+++ b/lava_dispatcher/pipeline/action.py
@@ -139,7 +139,8 @@ class Pipeline(object):
# per action loggers always operate in DEBUG mode - the frontend does the parsing later.
action.log_handler.setLevel(logging.DEBUG)
# yaml wrapper inside the log handler
- action.log_handler.setFormatter(logging.Formatter('id: "<LAVA_DISPATCHER>%(asctime)s"\n%(message)s'))
+ pattern = ' - id: "<LAVA_DISPATCHER>%(asctime)s"\n%(message)s'
+ action.log_handler.setFormatter(logging.Formatter(pattern))
# if the action has an internal pipeline, initialise that here.
action.populate()
@@ -433,7 +434,11 @@ class Action(object):
# logger per action si easier to use. Calling it YAML.%(action_name)s
yaml_log = logging.getLogger("YAML")
std_log = logging.getLogger("ASCII")
- yaml_log.debug({"output": message.split('\n')})
+ if type(message) is dict:
+ for key, value in message.iteritems():
+ yaml_log.debug(" %s: %s" % (key, value))
+ else:
+ yaml_log.debug(" log: \"%s\"" % message)
std_log.info(message)
def _run_command(self, command_list, env=None):
diff --git a/lava_dispatcher/pipeline/actions/deploy/image.py b/lava_dispatcher/pipeline/actions/deploy/image.py
index 1ea5b47..59e9ec6 100644
--- a/lava_dispatcher/pipeline/actions/deploy/image.py
+++ b/lava_dispatcher/pipeline/actions/deploy/image.py
@@ -49,6 +49,7 @@ class DeployImageAction(DeployAction):
self.summary = "deploy image"
def prepare(self):
+ # FIXME: move to validate or into DownloadAction?
# mktemp dir
req = requests.head(self.parameters['image']) # just check the headers, do not download.
if req.status_code != req.codes.ok:
diff --git a/lava_dispatcher/pipeline/actions/deploy/overlay.py b/lava_dispatcher/pipeline/actions/deploy/overlay.py
index ce9ab4f..f5cd58a 100644
--- a/lava_dispatcher/pipeline/actions/deploy/overlay.py
+++ b/lava_dispatcher/pipeline/actions/deploy/overlay.py
@@ -46,7 +46,10 @@ class OverlayAction(DeployAction):
will need to insert an instance of this class into the
Deploy pipeline, between mount and umount.
The overlay uses the 'mntdir' set by the MountAction
- in the job data
+ in the job data.
+ This class handles parts of the overlay which are independent
+ of the content of the test definitions themselves. Other
+ overlays are handled by TestDefinitionAction.
"""
# FIXME: remove redundant functions copied in from old code
# FIXME: is this ImageOverlayAction or can it work the same way for all deployments?
@@ -65,29 +68,18 @@ class OverlayAction(DeployAction):
# 755 file permissions
self.xmod = stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
- def _inject_testdef_parameters(self, fout): # FIXME: needs a separate action
- # inject default parameters that were defined in yaml first
- fout.write('###default parameters from yaml###\n')
- if 'params' in self.testdef:
- for def_param_name, def_param_value in self.testdef['params'].items():
- fout.write('%s=\'%s\'\n' % (def_param_name, def_param_value))
- fout.write('######\n')
- # inject the parameters that was set in json
- fout.write('###test parameters from json###\n')
- if self._sw_sources and 'test_params' in self._sw_sources[0] and self._sw_sources[0]['test_params'] != '':
- _test_params_temp = eval(self._sw_sources[0]['test_params'])
- for param_name, param_value in _test_params_temp.items():
- fout.write('%s=\'%s\'\n' % (param_name, param_value))
- fout.write('######\n')
-
def _create_target_install(self, hostdir, targetdir): # FIXME: needs a dedicated Action
+ """
+ Use the 'distro' element of the deployment-data to determine which
+ install helper to add to the overlay.
+ """
with open('%s/install.sh' % hostdir, 'w') as f:
self._inject_testdef_parameters(f)
f.write('set -ex\n')
f.write('cd %s\n' % targetdir)
if self.skip_install != 'deps':
- distro = self.context.client.target_device.deployment_data['distro']
+ distro = self.parameters['deployment_data']['distro']
# generic dependencies - must be named the same across all distros
# supported by the testdef
@@ -108,46 +100,6 @@ class OverlayAction(DeployAction):
for cmd in steps:
f.write('%s\n' % cmd)
- def copy_test(self, hostdir, targetdir): # FIXME: needs a dedicated Action
- """Copy the files needed to run this test to the device.
-
- :param hostdir: The location on the device filesystem to copy too.
- :param targetdir: The location `hostdir` will have when the device
- boots.
- """
- utils.ensure_directory(hostdir)
- with open('%s/testdef.yaml' % hostdir, 'w') as f:
- f.write(yaml.dump(self.testdef))
-
- with open('%s/uuid' % hostdir, 'w') as f:
- f.write(self.uuid)
-
- with open('%s/testdef_metadata' % hostdir, 'w') as f:
- f.write(yaml.safe_dump(self.testdef_metadata))
-
- if self.skip_install != "all":
- if 'install' in self.testdef:
- if self.skip_install != 'repos':
- self._create_repos(hostdir)
- self._create_target_install(hostdir, targetdir)
-
- with open('%s/run.sh' % hostdir, 'w') as f:
- self._inject_testdef_parameters(f)
- f.write('set -e\n')
- f.write('export TESTRUN_ID=%s\n' % self.test_id)
- f.write('cd %s\n' % targetdir)
- f.write('UUID=`cat uuid`\n')
- f.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
- f.write('#wait for an ack from the dispatcher\n')
- f.write('read\n')
- steps = self.testdef['run'].get('steps', [])
- if steps:
- for cmd in steps:
- f.write('%s\n' % cmd)
- f.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')
- f.write('#wait for an ack from the dispatcher\n')
- f.write('read\n')
-
def _copy_runner(self, mntdir):
shell = self.parameters['deployment_data']['lava_test_sh_cmd']
@@ -188,10 +140,8 @@ class OverlayAction(DeployAction):
self._copy_runner(lava_path)
# load test definitions is done by TestDefinitionAction, so we're finished
# debug: log the overlay directory contents
- self._run_command(["ls", "-l", lava_path])
- self._run_command(["cat", os.path.join(lava_path, 'lava-test-runner.conf')])
- self._run_command(["ls", "-l", os.path.join(lava_path, 'bin')])
- self._run_command(["ls", "-l", os.path.join(lava_path, 'tests')])
+ # self._run_command(["cat", os.path.join(lava_path, 'lava-test-runner.conf')])
+ # self._run_command(["ls", "-lR", os.path.join(lava_path, 'tests')])
return connection
diff --git a/lava_dispatcher/pipeline/actions/deploy/testdef.py b/lava_dispatcher/pipeline/actions/deploy/testdef.py
index d0e9ef9..16f8d83 100644
--- a/lava_dispatcher/pipeline/actions/deploy/testdef.py
+++ b/lava_dispatcher/pipeline/actions/deploy/testdef.py
@@ -19,9 +19,11 @@
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
+import yaml
import base64
import tarfile
import StringIO
+from uuid import uuid4
from lava_dispatcher.pipeline.action import (
Pipeline,
RetryAction,
@@ -41,6 +43,8 @@ class RepoAction(RetryAction):
self.tmpdir = None
self.vcs_binary = None
self.runner = None
+ self.uuid = str(uuid4())
+ self.testdef = None
def validate(self):
if self.vcs_binary and not os.path.exists(self.vcs_binary):
@@ -53,9 +57,27 @@ class RepoAction(RetryAction):
self.runner = ('%s/tests/%s\n' % (prefix, self.parameters['test_name']))
# mntdir is a temporary directory, not available at validate stage.
self.tmpdir = self.data['mount_action']['mntdir']
+ # NOTE: the eventual runner_path must remain empty until after the VCS clone, so let the VCS clone create the final dir
+
+ # if 'test-case-deps' in testdef: # FIXME - is this needed?
+ # self._get_dependent_test_cases(testdef)
+
+ # install is done from the main overlay
+ # TODO: implement
+ # if self.skip_install != "all":
+ # if 'install' in self.testdef:
+ # if self.skip_install != 'repos':
+ # self._create_repos(hostdir)
+ # self._create_target_install(hostdir, targetdir)
+
return connection
+def indices(string, char):
+
+ return [i for i, c in enumerate(string) if c == char]
+
+
class GitRepoAction(RepoAction):
"""
Each repo action is for a single repository,
@@ -83,7 +105,55 @@ class GitRepoAction(RepoAction):
os.chdir(runner_path)
self._run_command([self.vcs_binary, 'checkout', self.parameters['revision']])
commit_id = self._run_command([self.vcs_binary, 'log', '-1', '--pretty=%H']).strip()
- self.data[self.name]['commit_id'] = commit_id
+
+ # now read the YAML to create a testdef dict to retrieve metadata
+ yaml_file = os.path.join(runner_path, self.parameters['path'])
+ if not os.path.exists(yaml_file):
+ raise JobError("Unable to find test definition YAML: %s" % yaml_file)
+ with open(yaml_file, 'r') as f:
+ self.testdef = yaml.safe_load(f)
+
+ with open("%s/testdef.yaml" % runner_path, 'w') as f:
+ yaml.safe_dump(self.testdef, f)
+
+ # TODO: check if this copy dict is needed or if other routines
+ # can use the fields directly. Needs to be per handler as it reads the YAML & parameters.
+ self.data.update({
+ self.name: {
+ 'os': self.testdef['metadata'].get('os', ''),
+ 'devices': self.testdef['metadata'].get('devices', ''),
+ 'environment': self.testdef['metadata'].get('environment', ''),
+ 'branch_vcs': 'git',
+ 'project_name': self.parameters['test_name'], # FIXME: do we need this duplicate?
+ 'commit_id': commit_id, # FIXME: do we need this?
+ }
+ })
+
+ # write out the UUID of each test definition.
+ with open('%s/uuid' % runner_path, 'w') as f:
+ f.write(self.uuid)
+
+ # FIXME: does this match old-world test-shell?
+ with open('%s/testdef_metadata' % runner_path, 'w') as f:
+ f.write(yaml.safe_dump(self.data[self.name]))
+
+ with open('%s/run.sh' % runner_path, 'w') as f:
+ # self._inject_testdef_parameters(f)
+ f.write('set -ex\n')
+ f.write('export TESTRUN_ID=%s\n' % self.testdef['metadata']['name'])
+ f.write('cd %s\n' % self.runner)
+ f.write('UUID=`cat uuid`\n')
+ f.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
+ f.write('#wait for an ack from the dispatcher\n')
+ f.write('read\n')
+ steps = self.testdef['run'].get('steps', [])
+ if steps:
+ for cmd in steps:
+ f.write('%s\n' % cmd)
+ f.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')
+ f.write('#wait for an ack from the dispatcher\n')
+ f.write('read\n')
+
os.chdir(cwd)
if not self.valid:
raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs_binary, self.parameters))
@@ -117,6 +187,26 @@ class BzrRepoAction(RepoAction):
])
if self.errors:
raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs_binary, self.parameters))
+
+ # now read the YAML to create a testdef dict to retrieve metadata
+ yaml_file = os.path.join(runner_path, self.parameters['path'])
+ if not os.path.exists(yaml_file):
+ raise JobError("Unable to find test definition YAML: %s" % yaml_file)
+ with open(yaml_file, 'r') as f:
+ self.testdef = yaml.safe_load(f)
+
+ # FIXME: do this once and set the branch_vcs per handler?
+ self.data.update({
+ self.name: {
+ 'os': self.testdef['metadata'].get('os', ''),
+ 'devices': self.testdef['metadata'].get('devices', ''),
+ 'environment': self.testdef['metadata'].get('environment', ''),
+ 'branch_vcs': 'bzr',
+ 'project_name': self.parameters['test_name'], # FIXME: do we need this duplicate?
+ 'commit_id': commit_id, # FIXME: do we need this?
+ }
+ })
+
return connection
@@ -189,12 +279,35 @@ class UrlRepoAction(DownloaderAction):
raise JobError('Unable to get test definition from url\n' + str(exc))
finally:
self._log("Downloaded test definition file to %s." % runner_path)
+
+ i = []
+ for e in " $&()\"'<>/\\|;`":
+ i.extend(indices(self.testdef["metadata"]["name"], e))
+ if i:
+ msg = "Test name contains invalid symbol(s) at position(s): %s" % ", ".join(map(str, i))
+ raise JobError(msg)
+
+ try:
+ self.testdef["metadata"]["name"].encode()
+ except UnicodeEncodeError as e:
+ msg = "Test name contains non-ascii symbols: %s" % e
+ raise JobError(msg)
+
return connection
class TestDefinitionAction(TestAction):
def __init__(self):
+ """
+ The TestDefinitionAction installs each test definition into
+ the overlay. It does not execute the scripts in the test
+ definition, that is the job of the TestAction class.
+ One TestDefinitionAction handles all test definitions for
+ the current job.
+ In addition, a TestOverlayAction is added to the pipeline
+ to handle parts of the overlay which are test definition dependent.
+ """
super(TestDefinitionAction, self).__init__()
self.name = "test-definition"
self.description = "load test definitions into image"
@@ -219,12 +332,14 @@ class TestDefinitionAction(TestAction):
else:
self.errors = "unsupported handler"
raise JobError("unsupported testdef handler: %s %s" % (testdef, testdef['from']))
+ # set the full set of job YAML parameters for this handler as handler parameters.
handler.parameters = testdef
# store the correct test_name before incrementing the local index dict
handler.parameters['test_name'] = "%s_%s" % (len(index.keys()), handler.parameters['name'])
index[len(index.keys())] = handler.parameters['name']
self.internal_pipeline.add_action(handler)
# FIXME: the outer pipeline may add unwanted data to the parameters['test']
+ self.internal_pipeline.add_action(TestOverlayAction())
def validate(self):
if not self.job:
@@ -247,7 +362,46 @@ class TestDefinitionAction(TestAction):
self._log("Validation failed")
raise JobError("Invalid job data: %s\n" % '\n'.join(self.errors))
+ def _inject_testdef_parameters(self, fout): # FIXME: needs a separate action
+ # inject default parameters that were defined in yaml first
+ fout.write('###default parameters from yaml###\n')
+ if 'params' in self.testdef:
+ for def_param_name, def_param_value in self.testdef['params'].items():
+ fout.write('%s=\'%s\'\n' % (def_param_name, def_param_value))
+ fout.write('######\n')
+ # inject the parameters that was set in json
+ fout.write('###test parameters from json###\n')
+ if self._sw_sources and 'test_params' in self._sw_sources[0] and self._sw_sources[0]['test_params'] != '':
+ _test_params_temp = eval(self._sw_sources[0]['test_params'])
+ for param_name, param_value in _test_params_temp.items():
+ fout.write('%s=\'%s\'\n' % (param_name, param_value))
+ fout.write('######\n')
+
+ def copy_test(self, hostdir, targetdir): # FIXME: needs porting to the new classes
+ """Copy the files needed to run this test to the device.
+
+ :param hostdir: The location on the device filesystem to copy too.
+ :param targetdir: The location `hostdir` will have when the device
+ boots.
+ """
+
+ with open('%s/testdef_metadata' % hostdir, 'w') as f:
+ f.write(yaml.safe_dump(self.testdef_metadata))
+
+ if self.skip_install != "all":
+ if 'install' in self.testdef:
+ if self.skip_install != 'repos':
+ self._create_repos(hostdir)
+ self._create_target_install(hostdir, targetdir)
+
def run(self, connection, args=None):
+ """
+ Puts the requested test definition files into the overlay
+
+ :param connection: Connection object, if any.
+ :param args: Not used.
+ :return: the received Connection.
+ """
self._log("Loading test definitions")
# developer hack - if the image hasn't been downloaded this time, it may already contain old files
# should really be an rmtree but it is only here to save developer time on downloads...
@@ -256,5 +410,31 @@ class TestDefinitionAction(TestAction):
connection = self.internal_pipeline.run_actions(connection)
with open('%s/lava-test-runner.conf' % self.data['mount_action']['mntdir'], 'a') as runner_conf:
for handler in self.internal_pipeline.actions:
- runner_conf.write(handler.runner)
+ if isinstance(handler, RepoAction) or isinstance(handler, UrlRepoAction):
+ runner_conf.write(handler.runner)
+ if isinstance(handler, TestAction): # FIXME: separate actions for copy & inject
+ # run copy_test
+ hostdir = self.data['mount_action']['mntdir']
+ targetdir = ''
+ return connection
+
+
+class TestOverlayAction(TestAction):
+
+ def __init__(self):
+ """
+ When a job requires a TestAction, some files need to be added to
+ the existing Overlay as part of Test.
+ This class adds the necessary scripts to support test definitions
+ but which are test definition dependent, like the install handler.
+ """
+ super(TestOverlayAction, self).__init__()
+ self.name = "test-overlay"
+ self.description = "overlay test support files onto image"
+ self.summary = "applying LAVA test overlay"
+
+ def validate(self):
+ pass
+
+ def run(self, connection, args=None):
return connection
diff --git a/lava_dispatcher/pipeline/actions/test/__init__.py b/lava_dispatcher/pipeline/actions/test/__init__.py
index 6997a59..1d53ee9 100644
--- a/lava_dispatcher/pipeline/actions/test/__init__.py
+++ b/lava_dispatcher/pipeline/actions/test/__init__.py
@@ -42,11 +42,14 @@ class TestAction(Action):
Add LMPSignalDirector, if required
work out how to do _keep_running with separate classes for multinode & lmp
"""
- self._log("Executing test definitions")
+ if not connection:
+ self._log("No connection!")
+ self._log("Executing test definitions using %s" % connection.name)
# internal actions:
# setup SignalDirector
# set proxy
- connection.sendline(
+ # connection.run_command("ls -r %s" % self.data['lava_test_results_dir'])
+ connection.run_command(
"%s/bin/lava-test-runner %s" % (
self.data['lava_test_results_dir'],
self.data['lava_test_results_dir']),
diff --git a/lava_dispatcher/pipeline/shell.py b/lava_dispatcher/pipeline/shell.py
index f1092dd..438f8f0 100644
--- a/lava_dispatcher/pipeline/shell.py
+++ b/lava_dispatcher/pipeline/shell.py
@@ -39,7 +39,7 @@ class ShellCommand(pexpect.spawn): # pylint: disable=too-many-public-methods
def __init__(self, command, lava_timeout, cwd=None):
pexpect.spawn.__init__(
self, command, timeout=lava_timeout.duration, cwd=cwd, logfile=sys.stdout)
-
+ self.name = "ShellCommand"
# serial can be slow, races do funny things, so increase delay
# FIXME: this as to be a constant, written somewhere with all constants
self.delaybeforesend = 0.05
@@ -104,6 +104,7 @@ class ShellCommand(pexpect.spawn): # pylint: disable=too-many-public-methods
raise JobError("command timed out.")
except pexpect.EOF:
raise RuntimeError(" ".join(self.before.split('\r\n')))
+ yaml_log.debug("Prompt matched.")
return proc
def empty_buffer(self):
@@ -118,6 +119,7 @@ class ShellSession(Connection):
def __init__(self, device, shell_command):
super(ShellSession, self).__init__(device, shell_command)
self.__runner__ = None
+ self.name = "ShellSession"
@property
def runner(self):
diff --git a/lava_dispatcher/pipeline/test/test_defs.py b/lava_dispatcher/pipeline/test/test_defs.py
index fc5ab9a..f468855 100644
--- a/lava_dispatcher/pipeline/test/test_defs.py
+++ b/lava_dispatcher/pipeline/test/test_defs.py
@@ -21,7 +21,11 @@
from lava_dispatcher.pipeline.test.test_basic import Factory
from lava_dispatcher.tests.helper import LavaDispatcherTestCase
from lava_dispatcher.pipeline.actions.deploy import DeployAction
-from lava_dispatcher.pipeline.actions.deploy.testdef import TestDefinitionAction, GitRepoAction
+from lava_dispatcher.pipeline.actions.deploy.testdef import (
+ TestDefinitionAction,
+ GitRepoAction,
+ TestOverlayAction
+)
# Test the loading of test definitions within the deploy stage
@@ -39,12 +43,14 @@ class TestDefinitionHandlers(LavaDispatcherTestCase):
self.assertIsNotNone(action.name)
if isinstance(action, DeployAction):
testdef = action.pipeline.children[action.pipeline][4]
- self.assertEqual(len(testdef.internal_pipeline.actions), 2)
+ self.assertEqual(len(testdef.internal_pipeline.actions), 3)
self.assertIsInstance(testdef, TestDefinitionAction)
testdef.validate()
self.assertTrue(testdef.valid)
for repo_action in testdef.internal_pipeline.actions:
- self.assertIsInstance(repo_action, GitRepoAction)
+ if not isinstance(repo_action, GitRepoAction) and not\
+ isinstance(repo_action, TestOverlayAction):
+ self.fail("%s does not match GitRepoAction or TestOverlayAction" % type(repo_action))
repo_action.validate()
self.assertTrue(repo_action.valid)
# FIXME: needs deployment_data to be visible during validation
diff --git a/lava_dispatcher/pipeline/test/test_job.py b/lava_dispatcher/pipeline/test/test_job.py
index 11577e6..f461f00 100644
--- a/lava_dispatcher/pipeline/test/test_job.py
+++ b/lava_dispatcher/pipeline/test/test_job.py
@@ -159,7 +159,7 @@ class TestKVMBasicDeploy(LavaDispatcherTestCase):
overlay = None
unmount = None
self.assertTrue(os.path.exists(self.job.parameters['output_dir']))
- self.assertEqual(len(self.job.pipeline.describe().values()), 18) # this will keep changing until KVM is complete.
+ self.assertEqual(len(self.job.pipeline.describe().values()), 19) # this will keep changing until KVM is complete.
for action in self.job.pipeline.actions:
if isinstance(action, DeployAction):
# check parser has created a suitable deployment