summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf4
-rw-r--r--lava_dispatcher/device/vexpress.py8
-rw-r--r--lava_dispatcher/lava_test_shell/distro/centos/lava-install-packages17
-rw-r--r--lava_dispatcher/lava_test_shell/distro/centos/lava-installed-packages3
-rw-r--r--lava_dispatcher/lava_test_shell/distro/centos/lava-os-build32
-rw-r--r--lava_dispatcher/pipeline/action.py40
-rw-r--r--lava_dispatcher/pipeline/actions/boot/__init__.py2
-rw-r--r--lava_dispatcher/pipeline/actions/boot/bootloader_defaults.py156
-rw-r--r--lava_dispatcher/pipeline/actions/boot/fastboot.py9
-rw-r--r--lava_dispatcher/pipeline/actions/boot/grub.py8
-rw-r--r--lava_dispatcher/pipeline/actions/boot/ipxe.py6
-rw-r--r--lava_dispatcher/pipeline/actions/boot/iso.py2
-rw-r--r--lava_dispatcher/pipeline/actions/boot/kexec.py2
-rw-r--r--lava_dispatcher/pipeline/actions/boot/qemu.py7
-rw-r--r--lava_dispatcher/pipeline/actions/boot/strategies.py1
-rw-r--r--lava_dispatcher/pipeline/actions/boot/u_boot.py107
-rw-r--r--lava_dispatcher/pipeline/actions/boot/uefi_menu.py1
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/apply_overlay.py115
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/download.py15
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/environment.py6
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/fastboot.py49
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/lxc.py40
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/mount.py8
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/nfs.py122
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/overlay.py58
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/removable.py79
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/strategies.py1
-rw-r--r--lava_dispatcher/pipeline/actions/deploy/testdef.py102
-rw-r--r--lava_dispatcher/pipeline/actions/test/multinode.py14
-rw-r--r--lava_dispatcher/pipeline/actions/test/shell.py265
-rw-r--r--lava_dispatcher/pipeline/connection.py2
-rw-r--r--lava_dispatcher/pipeline/connections/lxc.py38
-rw-r--r--lava_dispatcher/pipeline/connections/ssh.py3
-rw-r--r--lava_dispatcher/pipeline/deployment_data.py33
-rw-r--r--lava_dispatcher/pipeline/device_types/beaglebone-black.conf6
-rw-r--r--lava_dispatcher/pipeline/devices/d02-01.yaml30
-rw-r--r--lava_dispatcher/pipeline/devices/juno-uboot.yaml106
-rw-r--r--lava_dispatcher/pipeline/devices/juno-uefi.yaml155
-rw-r--r--lava_dispatcher/pipeline/devices/mustang-uefi.yaml2
-rw-r--r--lava_dispatcher/pipeline/devices/nexus10-01.yaml2
-rw-r--r--lava_dispatcher/pipeline/devices/nexus4-01.yaml2
-rw-r--r--lava_dispatcher/pipeline/devices/nexus9-01.yaml33
-rw-r--r--lava_dispatcher/pipeline/devices/x86-01.yaml2
-rw-r--r--lava_dispatcher/pipeline/devices/x86-02.yaml26
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/lava-test-case88
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-network110
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-send17
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-sync20
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait21
-rw-r--r--lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait-all23
-rw-r--r--lava_dispatcher/pipeline/log.py44
-rw-r--r--lava_dispatcher/pipeline/logical.py14
-rw-r--r--lava_dispatcher/pipeline/power.py4
-rw-r--r--lava_dispatcher/pipeline/protocols/lxc.py8
-rw-r--r--lava_dispatcher/pipeline/protocols/multinode.py66
-rw-r--r--lava_dispatcher/pipeline/shell.py11
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml2
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml3
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml15
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/grub.yaml2
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml2
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml3
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml2
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml6
-rw-r--r--lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml3
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/bbb-skip-install.yaml1
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml4
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installed-x86.yaml24
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installer-x86.yaml62
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/grub-installer.yaml13
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-nfs.yaml46
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-removable.yaml81
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-tftp.yaml51
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/juno-uefi-tftp.yaml39
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml7
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml1
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/mustang-image.yaml58
-rw-r--r--lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml13
-rw-r--r--lava_dispatcher/pipeline/test/test_connections.py21
-rw-r--r--lava_dispatcher/pipeline/test/test_defs.py203
-rw-r--r--lava_dispatcher/pipeline/test/test_fastboot.py1
-rw-r--r--lava_dispatcher/pipeline/test/test_grub.py2
-rw-r--r--lava_dispatcher/pipeline/test/test_kvm.py39
-rw-r--r--lava_dispatcher/pipeline/test/test_menus.py2
-rw-r--r--lava_dispatcher/pipeline/test/test_multinode.py30
-rw-r--r--lava_dispatcher/pipeline/test/test_removable.py41
-rw-r--r--lava_dispatcher/pipeline/test/test_repeat.py5
-rw-r--r--lava_dispatcher/pipeline/test/test_uboot.py13
-rw-r--r--lava_dispatcher/pipeline/test/test_utils.py2
-rw-r--r--lava_dispatcher/pipeline/test/testdefs/params.yaml44
-rw-r--r--lava_dispatcher/pipeline/test/testdefs/result-data.txt5
-rw-r--r--lava_dispatcher/pipeline/utils/constants.py11
-rw-r--r--lava_dispatcher/pipeline/utils/filesystem.py26
-rw-r--r--lava_dispatcher/pipeline/utils/installers.py11
-rw-r--r--lava_dispatcher/pipeline/utils/messages.py4
-rw-r--r--lava_dispatcher/pipeline/utils/shell.py2
-rw-r--r--lava_dispatcher/pipeline/utils/vcs.py8
-rwxr-xr-xsetup.py2
98 files changed, 2636 insertions, 419 deletions
diff --git a/lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf b/lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf
index d9a13bd..6288800 100644
--- a/lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf
+++ b/lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf
@@ -15,9 +15,9 @@ u_load_addrs =
0x815f0000
z_load_addrs =
- 0x81000000
0x82000000
- 0x81f00000
+ 0x83000000
+ 0x88000000
boot_cmds_master =
setenv autoload no,
diff --git a/lava_dispatcher/device/vexpress.py b/lava_dispatcher/device/vexpress.py
index 8b549cb..4ca0da8 100644
--- a/lava_dispatcher/device/vexpress.py
+++ b/lava_dispatcher/device/vexpress.py
@@ -36,8 +36,6 @@ from lava_dispatcher.errors import (
OperationFailed,
)
-from master import MasterCommandRunner
-
class VexpressTarget(BootloaderTarget):
@@ -169,12 +167,6 @@ class VexpressTarget(BootloaderTarget):
firmware, bl0, bl1, bl2, bl31, rootfstype, bootloadertype,
target_type, qemu_pflash=qemu_pflash)
- def boot_master_image(self):
- super(VexpressTarget, self).boot_master_image()
- runner = MasterCommandRunner(self)
- runner.run("touch /forcefsck")
- runner.run("sync")
-
def _load_test_firmware(self):
with self._mcc_setup() as mount_point:
self._install_test_firmware(mount_point)
diff --git a/lava_dispatcher/lava_test_shell/distro/centos/lava-install-packages b/lava_dispatcher/lava_test_shell/distro/centos/lava-install-packages
new file mode 100644
index 0000000..57fdfce
--- /dev/null
+++ b/lava_dispatcher/lava_test_shell/distro/centos/lava-install-packages
@@ -0,0 +1,17 @@
+#!/bin/sh
+retry=0
+max_retry=4
+
+while [ 1 ]
+do
+ yum -e 0 -y -q install "$@"
+ return_value=$?
+ [ "$return_value" = 0 ] && break
+ [ "$retry" = "$max_retry" ] && exit $return_value
+
+ echo "================================================="
+ echo "($retry) Install failed... Retrying after some seconds"
+ echo "================================================="
+ sleep 60
+ retry=$((retry+1))
+done
diff --git a/lava_dispatcher/lava_test_shell/distro/centos/lava-installed-packages b/lava_dispatcher/lava_test_shell/distro/centos/lava-installed-packages
new file mode 100644
index 0000000..40556ad
--- /dev/null
+++ b/lava_dispatcher/lava_test_shell/distro/centos/lava-installed-packages
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+rpm -qa --qf "package: %{NAME} : %{VERSION}-%{RELEASE} \n"
diff --git a/lava_dispatcher/lava_test_shell/distro/centos/lava-os-build b/lava_dispatcher/lava_test_shell/distro/centos/lava-os-build
new file mode 100644
index 0000000..635723d
--- /dev/null
+++ b/lava_dispatcher/lava_test_shell/distro/centos/lava-os-build
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+if [ -x /usr/bin/lsb_release ]
+then
+ /usr/bin/lsb_release -d | sed -e 's/^Description:\s*//'
+ exit 0
+fi
+
+if [ -f /etc/redhat-release ]
+then
+ head -n 1 /etc/redhat-release
+ exit 0
+fi
+
+if [ -f /etc/fedora-release ]
+then
+ head -n 1 /etc/centos-release
+ exit 0
+fi
+
+if [ -f /etc/system-release ]
+then
+ head -n 1 /etc/system-release
+ exit 0
+fi
+
+if [ -f /etc/os-release ]
+then
+ . /etc/os-release
+ echo ${PRETTY_NAME}
+ exit 0
+fi
diff --git a/lava_dispatcher/pipeline/action.py b/lava_dispatcher/pipeline/action.py
index 7b83d05..ac0a434 100644
--- a/lava_dispatcher/pipeline/action.py
+++ b/lava_dispatcher/pipeline/action.py
@@ -184,6 +184,7 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
self._override_action_timeout(action, overrides['actions'])
elif action.name in overrides:
self._override_action_timeout(action, overrides)
+ parameters['timeout'] = overrides[action.name]
if 'connections' in overrides and action.name in overrides['connections']:
self._override_connection_timeout(action, overrides['connections'])
# Set the parameters after populate so the sub-actions are also
@@ -193,15 +194,14 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
# job overrides device timeouts:
if self.job and 'timeouts' in self.job.parameters:
overrides = self.job.parameters['timeouts']
- if 'actions' in overrides and action.name in overrides:
+ if 'actions' in overrides and action.name in overrides['actions']:
# set job level overrides
self._override_action_timeout(action, overrides['actions'])
elif action.name in overrides:
self._override_action_timeout(action, overrides)
parameters['timeout'] = overrides[action.name]
- if 'connections' in overrides and action.name in overrides:
+ if 'connections' in overrides and action.name in overrides['connections']:
self._override_connection_timeout(action, overrides['connections'])
-
action.parameters = parameters
def describe(self, verbose=True):
@@ -286,6 +286,13 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
def _log_action_results(self, action):
if action.results and isinstance(action.logger, YAMLLogger):
+ action.logger.results({
+ "definition": "lava",
+ "case": action.name,
+ "level": action.level,
+ "duration": action.elapsed_time,
+ "result": "fail" if action.errors else "pass",
+ "extra": action.results})
action.results.update(
{
'level': action.level,
@@ -294,7 +301,6 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
'connection-timeout': action.connection_timeout.duration
}
)
- action.logger.results({action.name: action.results})
def run_actions(self, connection, args=None): # pylint: disable=too-many-branches,too-many-statements,too-many-locals
@@ -315,6 +321,7 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
# the action which overran the timeout has been allowed to complete.
name = self.job.parameters.get('job_name', '?')
msg = "Job '%s' timed out after %s seconds" % (name, int(self.job.timeout.duration))
+ action.logger.error(msg)
action.errors = msg
final = self.actions[-1]
if final.name == "finalize":
@@ -331,10 +338,9 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
if isinstance(action.logger, YAMLLogger):
action.logger.setMetadata(action.level, action.name)
# Add action start timestamp to the log message
- msg = {'msg': 'start: %s %s (max %ds)' % (action.level,
- action.name,
- action.timeout.duration),
- 'ts': datetime.datetime.utcnow().isoformat()}
+ msg = 'start: %s %s (max %ds)' % (action.level,
+ action.name,
+ action.timeout.duration)
if self.parent is None:
action.logger.info(msg)
else:
@@ -353,7 +359,7 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
TypeError, RuntimeError, AttributeError):
action.elapsed_time = time.time() - start
msg = re.sub('\s+', ' ', ''.join(traceback.format_exc().split('\n')))
- action.logger.exception(msg)
+ action.logger.exception(traceback.format_exc())
action.errors = msg
action.cleanup()
self.cleanup_actions(connection, None)
@@ -363,9 +369,8 @@ class Pipeline(object): # pylint: disable=too-many-instance-attributes
raise KeyboardInterrupt
action.elapsed_time = time.time() - start
# Add action end timestamp to the log message
- msg = {'msg': "%s duration: %.02f" % (action.name,
- action.elapsed_time),
- 'ts': datetime.datetime.utcnow().isoformat()}
+ msg = "%s duration: %.02f" % (action.name,
+ action.elapsed_time)
if self.parent is None:
action.logger.info(msg)
else:
@@ -558,7 +563,9 @@ class Action(object): # pylint: disable=too-many-instance-attributes
self.timeout.name = self.name
# Overide the duration if needed
if 'timeout' in self.parameters:
- self.timeout.duration = Timeout.parse(self.parameters['timeout'])
+ # preserve existing overrides
+ if self.timeout.duration == Timeout.default_duration():
+ self.timeout.duration = Timeout.parse(self.parameters['timeout'])
if 'connection_timeout' in self.parameters:
self.connection_timeout.duration = Timeout.parse(self.parameters['connection_timeout'])
@@ -571,8 +578,8 @@ class Action(object): # pylint: disable=too-many-instance-attributes
self.max_retries = self.parameters['repeat']
if self.job:
if self.job.device:
- if 'character-delays' in self.job.device:
- self.character_delay = self.job.device['character-delays'].get(self.section, 0)
+ if 'character_delays' in self.job.device:
+ self.character_delay = self.job.device['character_delays'].get(self.section, 0)
@parameters.setter
def parameters(self, data):
@@ -822,7 +829,8 @@ class Action(object): # pylint: disable=too-many-instance-attributes
if not connection.connected:
self.logger.debug("Already disconnected")
return
- self.logger.debug("%s: Wait for prompt. %s seconds" % (self.name, int(self.connection_timeout.duration)))
+ self.logger.debug("%s: Wait for prompt. %s seconds",
+ self.name, int(self.connection_timeout.duration))
return connection.wait()
diff --git a/lava_dispatcher/pipeline/actions/boot/__init__.py b/lava_dispatcher/pipeline/actions/boot/__init__.py
index cac9ebe..a3799db 100644
--- a/lava_dispatcher/pipeline/actions/boot/__init__.py
+++ b/lava_dispatcher/pipeline/actions/boot/__init__.py
@@ -109,7 +109,7 @@ class AutoLoginAction(Action):
# use lazy logging or the string will not be quoted correctly.
def check_prompt_characters(prompt):
if not any([True for c in DISTINCTIVE_PROMPT_CHARACTERS if c in prompt]):
- self.logger.warning(self.check_prompt_characters_warning % prompt) # pylint: disable=logging-not-lazy
+ self.logger.warning(self.check_prompt_characters_warning, prompt)
connection.prompt_str = LinuxKernelMessages.get_init_prompts()
# Skip auto login if the configuration is not found
diff --git a/lava_dispatcher/pipeline/actions/boot/bootloader_defaults.py b/lava_dispatcher/pipeline/actions/boot/bootloader_defaults.py
new file mode 100644
index 0000000..6b450a3
--- /dev/null
+++ b/lava_dispatcher/pipeline/actions/boot/bootloader_defaults.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2016 Linaro Limited
+#
+# Author: Dean Arnold <dean.arnold@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+# List just the subclasses supported for this base strategy
+# imported by the parser to populate the list of subclasses.
+
+from lava_dispatcher.pipeline.action import (
+ Action,
+ Pipeline,
+)
+from lava_dispatcher.pipeline.logical import Boot
+from lava_dispatcher.pipeline.actions.boot import BootAction, AutoLoginAction
+from lava_dispatcher.pipeline.actions.boot.environment import ExportDeviceEnvironment
+from lava_dispatcher.pipeline.shell import ExpectShellSession
+from lava_dispatcher.pipeline.connections.serial import ConnectDevice
+from lava_dispatcher.pipeline.power import ResetDevice
+from lava_dispatcher.pipeline.utils.constants import (
+ UBOOT_AUTOBOOT_PROMPT,
+ BOOT_MESSAGE,
+)
+
+
+def default_accepts(device, parameters):
+ if 'method' not in parameters:
+ raise RuntimeError("method not specified in boot parameters")
+ if parameters['method'] != 'bootloader-defaults':
+ return False
+ if 'actions' not in device:
+ raise RuntimeError("Invalid device configuration")
+ if 'boot' not in device['actions']:
+ return False
+ if 'methods' not in device['actions']['boot']:
+ raise RuntimeError("Device misconfiguration")
+ return True
+
+
+class BootloaderDefaults(Boot):
+ """
+ """
+
+ compatibility = 1
+
+ def __init__(self, parent, parameters):
+ super(BootloaderDefaults, self).__init__(parent)
+ self.action = BootloaderDefaultsAction()
+ self.action.section = self.action_type
+ self.action.job = self.job
+ parent.add_action(self.action, parameters)
+
+ @classmethod
+ def accepts(cls, device, parameters):
+ if not default_accepts(device, parameters):
+ return False
+ return 'bootloader-defaults' in device['actions']['boot']['methods']
+
+
+class BootloaderDefaultsAction(BootAction):
+ """
+ Wraps the Retry Action to allow for actions which precede
+ the reset, e.g. Connect.
+ """
+ def __init__(self):
+ super(BootloaderDefaultsAction, self).__init__()
+ self.name = "bootloader-defaults-action"
+ self.description = "Autorun precanned bootloader entry"
+ self.summary = "allow bootloader to run"
+
+ def populate(self, parameters):
+ self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+ # customize the device configuration for this job
+ self.internal_pipeline.add_action(ConnectDevice())
+ self.internal_pipeline.add_action(BootloaderDefaultsRetry())
+
+
+class MonitorBootloaderAutoBoot(Action):
+ """
+ Waits for a shell connection to the device for the current job.
+ """
+
+ def __init__(self):
+ super(MonitorBootloaderAutoBoot, self).__init__()
+ self.name = "monitor-bootloader-autoboot"
+ self.summary = "Monitor that bootloder autoboot is taking place"
+ self.description = "Wait for autoboot to happen"
+
+ def run(self, connection, args=None):
+ if not connection:
+ raise RuntimeError("%s started without a connection already in use" % self.name)
+ connection = super(MonitorBootloaderAutoBoot, self).run(connection, args)
+ params = self.job.device['actions']['boot']['methods']['bootloader-defaults']['parameters']
+ connection.prompt_str = params.get('autoboot_prompt', UBOOT_AUTOBOOT_PROMPT)
+ self.logger.debug("Waiting for prompt: %s" % connection.prompt_str)
+ self.wait(connection)
+ # allow for auto_login
+ connection.prompt_str = params.get('boot_message', BOOT_MESSAGE)
+ self.logger.debug("Waiting for prompt: %s" % connection.prompt_str)
+ self.wait(connection)
+ return connection
+
+
+class BootloaderDefaultsRetry(BootAction):
+
+ def __init__(self):
+ super(BootloaderDefaultsRetry, self).__init__()
+ self.name = "uboot-retry"
+ self.description = "interactive uboot retry action"
+ self.summary = "uboot commands with retry"
+
+ def populate(self, parameters):
+ self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+ # establish a new connection before trying the reset
+ self.internal_pipeline.add_action(ResetDevice())
+ self.internal_pipeline.add_action(MonitorBootloaderAutoBoot()) # wait
+ # and set prompt to the uboot prompt
+ # Add AutoLoginAction unconditionally as this action does nothing if
+ # the configuration does not contain 'auto_login'
+ self.internal_pipeline.add_action(AutoLoginAction())
+ self.internal_pipeline.add_action(ExpectShellSession()) # wait
+ self.internal_pipeline.add_action(ExportDeviceEnvironment())
+
+ def validate(self):
+ super(BootloaderDefaultsRetry, self).validate()
+ self.set_common_data(
+ 'bootloader_prompt',
+ 'prompt',
+ self.job.device['actions']['boot']['methods']['bootloader-defaults']['parameters']['bootloader_prompt']
+ )
+
+ def run(self, connection, args=None):
+ connection = super(BootloaderDefaultsRetry, self).run(connection, args)
+ self.logger.debug("Setting default test shell prompt")
+ if not connection.prompt_str:
+ connection.prompt_str = self.parameters['prompts']
+ self.logger.debug(connection.prompt_str)
+ connection.timeout = self.connection_timeout
+ self.wait(connection)
+ self.logger.error(self.errors)
+ self.data['boot-result'] = 'failed' if self.errors else 'success'
+ return connection
diff --git a/lava_dispatcher/pipeline/actions/boot/fastboot.py b/lava_dispatcher/pipeline/actions/boot/fastboot.py
index 64f58e2..933ebe1 100644
--- a/lava_dispatcher/pipeline/actions/boot/fastboot.py
+++ b/lava_dispatcher/pipeline/actions/boot/fastboot.py
@@ -19,8 +19,6 @@
# with this program; if not, see <http://www.gnu.org/licenses>.
-import os
-import tarfile
from lava_dispatcher.pipeline.action import (
Pipeline,
Action,
@@ -28,11 +26,7 @@ from lava_dispatcher.pipeline.action import (
)
from lava_dispatcher.pipeline.logical import Boot
from lava_dispatcher.pipeline.actions.boot import BootAction
-from lava_dispatcher.pipeline.actions.boot import AutoLoginAction
-from lava_dispatcher.pipeline.shell import ExpectShellSession
from lava_dispatcher.pipeline.connections.lxc import ConnectLxc
-from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
-from lava_dispatcher.pipeline.utils.constants import ANDROID_TMP_DIR
class BootFastboot(Boot):
@@ -133,6 +127,9 @@ class WaitForAdbDevice(Action):
connection = super(WaitForAdbDevice, self).run(connection, args)
lxc_name = self.get_common_data('lxc', 'name')
serial_number = self.job.device['adb_serial_number']
+ adb_cmd = ['lxc-attach', '-n', lxc_name, '--', 'adb', 'start-server']
+ self.logger.debug("Starting adb daemon")
+ self.run_command(adb_cmd)
adb_cmd = ['lxc-attach', '-n', lxc_name, '--', 'adb',
'-s', serial_number, 'wait-for-device']
self.logger.debug("%s: Waiting for device", serial_number)
diff --git a/lava_dispatcher/pipeline/actions/boot/grub.py b/lava_dispatcher/pipeline/actions/boot/grub.py
index 76d2dc8..cd5ed0f 100644
--- a/lava_dispatcher/pipeline/actions/boot/grub.py
+++ b/lava_dispatcher/pipeline/actions/boot/grub.py
@@ -140,7 +140,7 @@ class BootloaderInterrupt(Action):
if self.job.device.connect_command is '':
self.errors = "Unable to connect to device %s" % hostname
else:
- self.logger.debug("%s may need manual intervention to reboot" % hostname)
+ self.logger.debug("%s may need manual intervention to reboot", hostname)
device_methods = self.job.device['actions']['boot']['methods']
if 'bootloader_prompt' not in device_methods[self.type]['parameters']:
self.errors = "Missing bootloader prompt for device"
@@ -176,7 +176,8 @@ class InstallerWait(Action):
def run(self, connection, args=None):
connection = super(InstallerWait, self).run(connection, args)
wait_string = self.parameters['boot_finished']
- self.logger.debug("Not expecting a shell, so waiting for boot_finished: %s", wait_string)
+ msg = wait_string if isinstance(wait_string, str) else ', '.join(wait_string)
+ self.logger.debug("Not expecting a shell, so waiting for boot_finished: %s", msg)
connection.prompt_str = wait_string
self.wait(connection)
self.data['boot-result'] = 'failed' if self.errors else 'success'
@@ -237,6 +238,7 @@ class BootloaderCommandOverlay(Action):
'{SERVER_IP}': ip_addr
}
substitutions['{PRESEED_CONFIG}'] = self.get_common_data('file', 'preseed')
+ substitutions['{PRESEED_LOCAL}'] = self.get_common_data('file', 'preseed_local')
substitutions['{DTB}'] = self.get_common_data('file', 'dtb')
substitutions['{RAMDISK}'] = self.get_common_data('file', 'ramdisk')
substitutions['{KERNEL}'] = self.get_common_data('file', 'kernel')
@@ -291,6 +293,6 @@ class BootloaderCommandsAction(Action):
i += 1
# allow for auto_login
connection.prompt_str = self.params.get('boot_message', BOOT_MESSAGE)
- self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ self.logger.debug("Changing prompt to %s", connection.prompt_str)
self.wait(connection)
return connection
diff --git a/lava_dispatcher/pipeline/actions/boot/ipxe.py b/lava_dispatcher/pipeline/actions/boot/ipxe.py
index 912517e..200dc15 100644
--- a/lava_dispatcher/pipeline/actions/boot/ipxe.py
+++ b/lava_dispatcher/pipeline/actions/boot/ipxe.py
@@ -169,7 +169,7 @@ class BootloaderInterrupt(Action):
if self.job.device.connect_command is '':
self.errors = "Unable to connect to device %s" % hostname
else:
- self.logger.debug("%s may need manual intervention to reboot" % hostname)
+ self.logger.debug("%s may need manual intervention to reboot", hostname)
device_methods = self.job.device['actions']['boot']['methods']
if 'bootloader_prompt' not in device_methods[self.type]['parameters']:
self.errors = "Missing bootloader prompt for device"
@@ -299,7 +299,7 @@ class BootloaderCommandsAction(Action):
self.errors = "%s started without a connection already in use" % self.name
connection = super(BootloaderCommandsAction, self).run(connection, args)
connection.prompt_str = self.params['bootloader_prompt']
- self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ self.logger.debug("Changing prompt to %s", connection.prompt_str)
self.wait(connection)
i = 1
for line in self.data[self.type]['commands']:
@@ -309,6 +309,6 @@ class BootloaderCommandsAction(Action):
i += 1
# allow for auto_login
connection.prompt_str = self.params.get('boot_message', BOOT_MESSAGE)
- self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ self.logger.debug("Changing prompt to %s", connection.prompt_str)
self.wait(connection)
return connection
diff --git a/lava_dispatcher/pipeline/actions/boot/iso.py b/lava_dispatcher/pipeline/actions/boot/iso.py
index 8958614..39e052b 100644
--- a/lava_dispatcher/pipeline/actions/boot/iso.py
+++ b/lava_dispatcher/pipeline/actions/boot/iso.py
@@ -202,5 +202,5 @@ class IsoRebootAction(Action):
shell_connection.prompt_str = [INSTALLER_QUIET_MSG]
self.wait(shell_connection)
self.data['boot-result'] = 'failed' if self.errors else 'success'
- self.logger.debug("boot-result: %s" % self.data['boot-result'])
+ self.logger.debug("boot-result: %s", self.data['boot-result'])
return shell_connection
diff --git a/lava_dispatcher/pipeline/actions/boot/kexec.py b/lava_dispatcher/pipeline/actions/boot/kexec.py
index deab0b5..2fadd81 100644
--- a/lava_dispatcher/pipeline/actions/boot/kexec.py
+++ b/lava_dispatcher/pipeline/actions/boot/kexec.py
@@ -112,7 +112,7 @@ class KexecAction(Action):
connection = super(KexecAction, self).run(connection, args)
if 'kernel-config' in self.parameters:
cmd = "zgrep -i kexec %s |grep -v '^#'" % self.parameters['kernel-config']
- self.logger.debug("Checking for kexec: %s" % cmd)
+ self.logger.debug("Checking for kexec: %s", cmd)
connection.sendline(cmd)
connection.sendline(self.load_command)
self.wait(connection)
diff --git a/lava_dispatcher/pipeline/actions/boot/qemu.py b/lava_dispatcher/pipeline/actions/boot/qemu.py
index 7ac4d17..1e80a24 100644
--- a/lava_dispatcher/pipeline/actions/boot/qemu.py
+++ b/lava_dispatcher/pipeline/actions/boot/qemu.py
@@ -117,11 +117,16 @@ class CallQemuAction(Action):
self.errors = "Unable to identify boot prompts from job definition."
try:
boot = self.job.device['actions']['boot']['methods']['qemu']
+ if 'parameters' not in boot or 'command' not in boot['parameters']:
+ self.errors = "Invalid device configuration - missing parameters"
+ elif not boot['parameters']['command']:
+ self.errors = "No QEMU binary command found - missing context."
qemu_binary = which(boot['parameters']['command'])
self.sub_command = [qemu_binary]
self.sub_command.extend(boot['parameters'].get('options', []))
except AttributeError as exc:
- raise InfrastructureError(exc)
+ self.errors = "Unable to parse device options: %s %s" % (
+ exc, self.job.device['actions']['boot']['methods']['qemu'])
except (KeyError, TypeError):
self.errors = "Invalid parameters for %s" % self.name
substitutions = {}
diff --git a/lava_dispatcher/pipeline/actions/boot/strategies.py b/lava_dispatcher/pipeline/actions/boot/strategies.py
index 0d709e2..79b5754 100644
--- a/lava_dispatcher/pipeline/actions/boot/strategies.py
+++ b/lava_dispatcher/pipeline/actions/boot/strategies.py
@@ -33,3 +33,4 @@ from lava_dispatcher.pipeline.actions.boot.lxc import BootLxc
from lava_dispatcher.pipeline.actions.boot.ipxe import IPXE
from lava_dispatcher.pipeline.actions.boot.grub import Grub
from lava_dispatcher.pipeline.actions.boot.iso import BootIsoInstaller
+from lava_dispatcher.pipeline.actions.boot.bootloader_defaults import BootloaderDefaults
diff --git a/lava_dispatcher/pipeline/actions/boot/u_boot.py b/lava_dispatcher/pipeline/actions/boot/u_boot.py
index 56836a8..629f017 100644
--- a/lava_dispatcher/pipeline/actions/boot/u_boot.py
+++ b/lava_dispatcher/pipeline/actions/boot/u_boot.py
@@ -21,6 +21,7 @@
# List just the subclasses supported for this base strategy
# imported by the parser to populate the list of subclasses.
+import os.path
from lava_dispatcher.pipeline.action import (
Action,
Pipeline,
@@ -96,6 +97,7 @@ class UBootAction(BootAction):
def populate(self, parameters):
self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
# customize the device configuration for this job
+ self.internal_pipeline.add_action(UBootPrepareKernelAction())
self.internal_pipeline.add_action(UBootSecondaryMedia())
self.internal_pipeline.add_action(UBootCommandOverlay())
self.internal_pipeline.add_action(ConnectDevice())
@@ -117,7 +119,7 @@ class ExpectBootloaderSession(Action):
connection = super(ExpectBootloaderSession, self).run(connection, args)
device_methods = self.job.device['actions']['boot']['methods']
connection.prompt_str = device_methods['u-boot']['parameters']['bootloader_prompt']
- self.logger.debug("%s: Waiting for prompt" % self.name)
+ self.logger.debug("%s: Waiting for prompt", self.name)
self.wait(connection)
return connection
@@ -161,8 +163,12 @@ class UBootRetry(BootAction):
self.logger.debug(connection.prompt_str)
connection.timeout = self.connection_timeout
self.wait(connection)
- self.logger.error(self.errors)
- self.data['boot-result'] = 'failed' if self.errors else 'success'
+ # Log an error only when needed
+ if self.errors:
+ self.logger.error(self.errors)
+ self.data['boot-result'] = 'failed'
+ else:
+ self.data['boot-result'] = 'success'
return connection
@@ -187,7 +193,7 @@ class UBootInterrupt(Action):
if self.job.device.connect_command is '':
self.errors = "Unable to connect to device %s" % hostname
else:
- self.logger.debug("%s may need manual intervention to reboot" % hostname)
+ self.logger.debug("%s may need manual intervention to reboot", hostname)
device_methods = self.job.device['actions']['boot']['methods']
if 'bootloader_prompt' not in device_methods['u-boot']['parameters']:
self.errors = "Missing bootloader prompt for device"
@@ -322,9 +328,15 @@ class UBootCommandOverlay(Action):
substitutions['{RAMDISK_ADDR}'] = ramdisk_addr
if not self.get_common_data('tftp', 'ramdisk') and not self.get_common_data('file', 'ramdisk'):
ramdisk_addr = '-'
-
+ bootcommand = self.parameters['type']
+ if self.parameters['type'] == 'uimage':
+ bootcommand = 'bootm'
+ elif self.parameters['type'] == 'zimage':
+ bootcommand = 'bootz'
+ elif self.parameters['type'] == 'image':
+ bootcommand = 'booti'
substitutions['{BOOTX}'] = "%s %s %s %s" % (
- self.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr)
+ bootcommand, kernel_addr, ramdisk_addr, dtb_addr)
substitutions['{RAMDISK}'] = self.get_common_data('file', 'ramdisk')
substitutions['{KERNEL}'] = self.get_common_data('file', 'kernel')
@@ -342,8 +354,9 @@ class UBootCommandOverlay(Action):
substitutions['{ROOT}'] = self.get_common_data('uuid', 'root') # UUID label, not a file
substitutions['{ROOT_PART}'] = self.get_common_data('uuid', 'boot_part')
+ self.data.setdefault('u-boot', {})
self.data['u-boot']['commands'] = substitute(self.commands, substitutions)
- self.logger.debug("Parsed boot commands: %s" % '; '.join(self.data['u-boot']['commands']))
+ self.logger.debug("Parsed boot commands: %s", '; '.join(self.data['u-boot']['commands']))
return connection
@@ -371,13 +384,89 @@ class UBootCommandsAction(Action):
self.errors = "%s started without a connection already in use" % self.name
connection = super(UBootCommandsAction, self).run(connection, args)
connection.prompt_str = self.params['bootloader_prompt']
- self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ self.logger.debug("Changing prompt to %s", connection.prompt_str)
for line in self.data['u-boot']['commands']:
self.wait(connection)
connection.sendline(line, delay=self.character_delay)
# allow for auto_login
params = self.job.device['actions']['boot']['methods']['u-boot']['parameters']
connection.prompt_str = params.get('boot_message', BOOT_MESSAGE)
- self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ self.logger.debug("Changing prompt to %s", connection.prompt_str)
self.wait(connection)
return connection
+
+
+class UBootPrepareKernelAction(Action):
+ """
+ Convert kernels to uImage or append DTB, if needed
+ """
+ def __init__(self):
+ super(UBootPrepareKernelAction, self).__init__()
+ self.name = "uboot-prepare-kernel"
+ self.description = "convert kernel to uimage or append dtb"
+ self.summary = "prepare/convert kernel"
+ self.type = None
+ self.params = None
+ self.kernel_type = None
+
+ def create_uimage(self, kernel, load_addr, xip, arch, output): # pylint: disable=too-many-arguments
+ load_addr = int(load_addr, 16)
+ uimage_path = '%s/%s' % (os.path.dirname(kernel), output)
+ if xip:
+ entry_addr = load_addr + 64
+ else:
+ entry_addr = load_addr
+ cmd = "mkimage -A %s -O linux -T kernel" \
+ " -C none -a 0x%x -e 0x%x" \
+ " -d %s %s" % (arch, load_addr,
+ entry_addr, kernel,
+ uimage_path)
+ if self.run_command(cmd.split(' ')):
+ return uimage_path
+ else:
+ raise InfrastructureError("uImage creation failed")
+
+ def validate(self):
+ super(UBootPrepareKernelAction, self).validate()
+ self.params = self.job.device['actions']['boot']['methods']['u-boot']['parameters']
+ self.kernel_type = self.get_common_data('type', 'kernel')
+ if 'type' in self.parameters:
+ self.type = str(self.parameters['type']).lower()
+ if self.type:
+ if self.type not in self.job.device['parameters']:
+ self.errors = "Requested kernel boot type '%s' not supported by this device." % self.type
+ if self.type == "bootm" or self.type == "bootz" or self.type == "booti":
+ self.logger.info("booti, bootm and bootz are being deprecated soon, please use 'image','uimage' or 'zimage'")
+ if self.kernel_type:
+ self.kernel_type = str(self.kernel_type).lower()
+ if self.type != self.kernel_type:
+ if 'mkimage_arch' not in self.params:
+ self.errors = "Missing architecture for uboot mkimage support (mkimage_arch in u-boot parameters)"
+ if self.type == 'zimage' and self.kernel_type == 'uimage':
+ self.errors = "Can't convert a uimage to zimage"
+ elif self.type == 'zimage' and self.kernel_type == 'image':
+ self.errors = "Can't convert an image to zimage"
+ elif self.type == 'image' and self.kernel_type == 'zimage':
+ self.errors = "Can't convert a zimage to image"
+
+ def run(self, connection, args=None):
+ connection = super(UBootPrepareKernelAction, self).run(connection, args)
+ if not self.kernel_type:
+ return connection # idempotency
+ old_kernel = self.get_common_data('file', 'kernel')
+ filename = self.data['download_action']['kernel']['file']
+ load_addr = self.job.device['parameters'][self.type]['kernel']
+ if 'text_offset' in self.job.device['parameters']:
+ load_addr = self.job.device['parameters']['text_offset']
+ arch = self.params['mkimage_arch']
+ if (self.type == "uimage" or self.type == "bootm") and self.kernel_type == "image":
+ self.logger.debug("Converting image to uimage")
+ self.create_uimage(filename, load_addr, False, arch, 'uImage')
+ new_kernel = os.path.dirname(old_kernel) + '/uImage'
+ self.set_common_data('file', 'kernel', new_kernel)
+ elif (self.type == "uimage" or self.type == "bootm") and self.kernel_type == "zimage":
+ self.logger.debug("Converting zimage to uimage")
+ self.create_uimage(filename, load_addr, False, arch, 'uImage')
+ new_kernel = os.path.dirname(old_kernel) + '/uImage'
+ self.set_common_data('file', 'kernel', new_kernel)
+ return connection
diff --git a/lava_dispatcher/pipeline/actions/boot/uefi_menu.py b/lava_dispatcher/pipeline/actions/boot/uefi_menu.py
index c0de229..56901a0 100644
--- a/lava_dispatcher/pipeline/actions/boot/uefi_menu.py
+++ b/lava_dispatcher/pipeline/actions/boot/uefi_menu.py
@@ -32,7 +32,6 @@ from lava_dispatcher.pipeline.menus.menus import (
)
from lava_dispatcher.pipeline.logical import Boot
from lava_dispatcher.pipeline.power import ResetDevice
-from lava_dispatcher.pipeline.shell import ExpectShellSession
from lava_dispatcher.pipeline.utils.strings import substitute
from lava_dispatcher.pipeline.utils.network import dispatcher_ip
from lava_dispatcher.pipeline.actions.boot import BootAction, AutoLoginAction
diff --git a/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py b/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py
index 669327b..4a4e491 100644
--- a/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py
+++ b/lava_dispatcher/pipeline/actions/deploy/apply_overlay.py
@@ -33,37 +33,23 @@ from lava_dispatcher.pipeline.utils.constants import (
RAMDISK_FNAME,
DISPATCHER_DOWNLOAD_DIR,
)
-from lava_dispatcher.pipeline.utils.installers import add_late_command
-from lava_dispatcher.pipeline.utils.filesystem import mkdtemp, prepare_guestfs
+from lava_dispatcher.pipeline.utils.installers import (
+ add_late_command,
+ add_to_kickstart
+)
+from lava_dispatcher.pipeline.utils.filesystem import (
+ mkdtemp,
+ prepare_guestfs,
+ copy_in_overlay
+)
from lava_dispatcher.pipeline.utils.shell import infrastructure_error
from lava_dispatcher.pipeline.utils.compression import (
compress_file,
decompress_file,
untar_file
)
-
-
-class ApplyOverlayImage(Action):
- """
- Applies the overlay to an image using mntdir
- * checks that the filesystem we need is actually mounted.
- """
- def __init__(self):
- super(ApplyOverlayImage, self).__init__()
- self.name = "apply-overlay-image"
- self.summary = "unpack overlay onto image"
- self.description = "unpack overlay onto image mountpoint"
-
- def run(self, connection, args=None):
- if not self.data['compress-overlay'].get('output'):
- raise RuntimeError("Unable to find the overlay")
- if not os.path.ismount(self.data['loop_mount']['mntdir']):
- raise RuntimeError("Image overlay requested to be applied but %s is not a mountpoint" %
- self.data['loop_mount']['mntdir'])
- connection = super(ApplyOverlayImage, self).run(connection, args)
- # use tarfile module - no SELinux support here yet
- untar_file(self.data['compress-overlay'].get('output'), self.data['loop_mount']['mntdir'])
- return connection
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.utils.network import dispatcher_ip
class ApplyOverlayGuest(Action):
@@ -96,6 +82,29 @@ class ApplyOverlayGuest(Action):
return connection
+class ApplyOverlayImage(Action):
+
+ def __init__(self):
+ super(ApplyOverlayImage, self).__init__()
+ self.name = "apply-overlay-image"
+ self.summary = "apply overlay via guestfs to test image"
+
+ def validate(self):
+ super(ApplyOverlayImage, self).validate()
+
+ def run(self, connection, args=None):
+ if not self.data['compress-overlay'].get('output'):
+ raise RuntimeError("Unable to find the overlay")
+ overlay = self.data['compress-overlay'].get('output')
+ self.logger.debug("Overlay: %s", overlay)
+ decompressed_image = self.data['download_action']['image']['file']
+ self.logger.debug("Image: %s", decompressed_image)
+ root_partition = self.parameters['image']['root_partition']
+ self.logger.debug("root_partition: %s", root_partition)
+ copy_in_overlay(decompressed_image, root_partition, overlay)
+ return connection
+
+
class PrepareOverlayTftp(Action):
"""
Extracts the ramdisk or nfsrootfs in preparation for the lava overlay
@@ -113,8 +122,8 @@ class PrepareOverlayTftp(Action):
self.internal_pipeline.add_action(ExtractRamdisk()) # idempotent, checks for a ramdisk parameter
self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter
self.internal_pipeline.add_action(ApplyOverlayTftp())
- self.internal_pipeline.add_action(CompressRamdisk()) # idempotent, checks for a ramdisk parameter
self.internal_pipeline.add_action(ConfigurePreseedFile()) # idempotent, checks for a preseed parameter
+ self.internal_pipeline.add_action(CompressRamdisk()) # idempotent, checks for a ramdisk parameter
def run(self, connection, args=None):
connection = super(PrepareOverlayTftp, self).run(connection, args)
@@ -167,10 +176,23 @@ class ApplyOverlayTftp(Action):
else:
self.logger.debug("No overlay directory")
self.logger.debug(self.parameters)
+ if self.parameters.get('os', None) == "centos_installer":
+ # centos installer ramdisk doesnt like having anything other
+ # than the kickstart config being inserted. Instead, make the
+ # overlay accessible through tftp. Yuck.
+ tftp_dir = os.path.dirname(self.data['download_action']['ramdisk']['file'])
+ shutil.copy(overlay_file, tftp_dir)
+ suffix = self.data['tftp-deploy'].get('suffix', '')
+ self.set_common_data('file', 'overlay', os.path.join(suffix, os.path.basename(overlay_file)))
untar_file(overlay_file, directory)
if nfs_url:
subprocess.check_output(['umount', directory])
os.rmdir(directory) # fails if the umount fails
+ if overlay_file:
+ untar_file(overlay_file, directory)
+ if nfs_url:
+ subprocess.check_output(['umount', directory])
+ os.rmdir(directory) # fails if the umount fails
return connection
@@ -217,6 +239,7 @@ class ExtractNfsRootfs(ExtractRootfs):
self.summary = "unpack nfsrootfs, ready to apply lava overlay"
self.param_key = 'nfsrootfs'
self.file_key = "nfsroot"
+ self.rootdir = DISPATCHER_DOWNLOAD_DIR
def validate(self):
super(ExtractNfsRootfs, self).validate()
@@ -228,6 +251,31 @@ class ExtractNfsRootfs(ExtractRootfs):
self.errors = "no file specified extract as %s" % self.param_key
if not os.path.exists('/usr/sbin/exportfs'):
raise InfrastructureError("NFS job requested but nfs-kernel-server not installed.")
+ if 'prefix' in self.parameters[self.param_key]:
+ prefix = self.parameters[self.param_key]['prefix']
+ if prefix.startswith('/'):
+ self.errors = 'prefix must not be an absolute path'
+ if not prefix.endswith('/'):
+ self.errors = 'prefix must be a directory and end with /'
+
+ def run(self, connection, args=None):
+ if not self.parameters.get(self.param_key, None): # idempotency
+ return connection
+ connection = super(ExtractNfsRootfs, self).run(connection, args)
+ root = self.data['download_action'][self.param_key]['file']
+ root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
+ untar_file(root, root_dir)
+ if 'prefix' in self.parameters[self.param_key]:
+ prefix = self.parameters[self.param_key]['prefix']
+ self.logger.warning("Adding '%s' prefix, any other content will not be visible." % prefix)
+ self.rootdir = os.path.join(root_dir, prefix)
+ else:
+ self.rootdir = root_dir
+ # sets the directory into which the overlay is unpacked and
+ # which is used in the substitutions into the bootloader command string.
+ self.set_common_data('file', self.file_key, self.rootdir)
+ self.logger.debug("Extracted %s to %s", self.file_key, self.rootdir)
+ return connection
class ExtractModules(Action):
@@ -366,6 +414,16 @@ class CompressRamdisk(Action):
raise RuntimeError("Unable to find ramdisk directory")
ramdisk_dir = self.data['extract-overlay-ramdisk']['extracted_ramdisk']
ramdisk_data = self.data['extract-overlay-ramdisk']['ramdisk_file']
+ if self.parameters.get('preseed', None):
+ if self.parameters["deployment_data"].get("preseed_to_ramdisk", None):
+ # download action must have completed to get this far
+ # some installers (centos) cannot fetch the preseed file via tftp.
+ # Instead, put the preseed file into the ramdisk using a given name
+ # from deployment_data which we can use in the boot commands.
+ filename = self.parameters["deployment_data"]["preseed_to_ramdisk"]
+ self.logger.info("Copying preseed file into ramdisk: %s", filename)
+ shutil.copy(self.data['download_action']['preseed']['file'], os.path.join(ramdisk_dir, filename))
+ self.set_common_data('file', 'preseed_local', filename)
pwd = os.getcwd()
os.chdir(ramdisk_dir)
self.logger.debug("Building ramdisk %s containing %s",
@@ -467,3 +525,8 @@ class ConfigurePreseedFile(Action):
if self.parameters["deployment_data"].get('installer_extra_cmd', None):
if self.parameters.get('os', None) == "debian_installer":
add_late_command(self.data['download_action']['preseed']['file'], self.parameters["deployment_data"]["installer_extra_cmd"])
+ if self.parameters.get('os', None) == "centos_installer":
+ substitutions = {}
+ substitutions['{OVERLAY_URL}'] = 'tftp://' + dispatcher_ip() + '/' + self.get_common_data('file', 'overlay')
+ post_command = substitute([self.parameters["deployment_data"]["installer_extra_cmd"]], substitutions)
+ add_to_kickstart(self.data['download_action']['preseed']['file'], post_command[0])
diff --git a/lava_dispatcher/pipeline/actions/deploy/download.py b/lava_dispatcher/pipeline/actions/deploy/download.py
index b52cbab..a67c9f7 100644
--- a/lava_dispatcher/pipeline/actions/deploy/download.py
+++ b/lava_dispatcher/pipeline/actions/deploy/download.py
@@ -123,6 +123,7 @@ class DownloadHandler(Action): # pylint: disable=too-many-instance-attributes
self.logger.debug("Cleaning up temporary tree.")
shutil.rmtree(nested_tmp_dir)
self.data['download_action'][self.key]['file'] = ''
+ super(DownloadHandler, self).cleanup()
def _url_to_fname_suffix(self, path, modify):
filename = os.path.basename(self.url.path)
@@ -138,13 +139,6 @@ class DownloadHandler(Action): # pylint: disable=too-many-instance-attributes
filename = os.path.join(path, '.'.join(parts[:-1]))
return filename, suffix
- def cleanup(self):
- nested_tmp_dir = os.path.join(self.path, self.key)
- if os.path.exists(nested_tmp_dir):
- self.logger.info("%s %s cleanup", self.name, nested_tmp_dir)
- shutil.rmtree(nested_tmp_dir)
- super(DownloadHandler, self).cleanup()
-
@contextlib.contextmanager
def _decompressor_stream(self): # pylint: disable=too-many-branches
dwnld_file = None
@@ -211,7 +205,7 @@ class DownloadHandler(Action): # pylint: disable=too-many-instance-attributes
self.data['download_action'][self.key]['image_arg'] = image_arg
else:
self.url = lavaurl.urlparse(self.parameters[self.key]['url'])
- compression = self.parameters[self.key].get('compression', False)
+ compression = self.parameters[self.key].get('compression', None)
overlay = self.parameters.get('overlay', False)
fname, _ = self._url_to_fname_suffix(self.path, compression)
self.data['download_action'][self.key] = {'file': fname}
@@ -221,6 +215,9 @@ class DownloadHandler(Action): # pylint: disable=too-many-instance-attributes
if compression:
if compression not in ['gz', 'bz2', 'xz']:
self.errors = "Unknown 'compression' format '%s'" % compression
+ # pass kernel type to boot Action
+ if self.key == 'kernel':
+ self.set_common_data('type', self.key, self.parameters[self.key].get('type', None))
def run(self, connection, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
def progress_unknown_total(downloaded_size, last_value):
@@ -249,7 +246,7 @@ class DownloadHandler(Action): # pylint: disable=too-many-instance-attributes
md5sum = remote.get('md5sum', None)
sha256sum = remote.get('sha256sum', None)
- self.logger.info("downloading %s as %s" % (remote, fname))
+ self.logger.info("downloading %s as %s" % (remote['url'], fname))
downloaded_size = 0
beginning = time.time()
diff --git a/lava_dispatcher/pipeline/actions/deploy/environment.py b/lava_dispatcher/pipeline/actions/deploy/environment.py
index ce81dd7..af6cd8e 100644
--- a/lava_dispatcher/pipeline/actions/deploy/environment.py
+++ b/lava_dispatcher/pipeline/actions/deploy/environment.py
@@ -40,15 +40,15 @@ class DeployDeviceEnvironment(Action):
if 'lava_test_shell_file' not in \
self.parameters['deployment_data'].keys():
self.errors = "Invalid deployment data - missing lava_test_shell_file"
- if self.job.parameters['env_dut']:
+
+ if 'env_dut' in self.job.parameters and self.job.parameters['env_dut']:
+ # Check that the file is valid yaml
try:
yaml.load(self.job.parameters['env_dut'])
except (TypeError, yaml.scanner.ScannerError) as exc:
self.errors = exc
return
- if 'env_dut' in self.job.parameters and self.job.parameters['env_dut']:
-
self.env = self.job.parameters['env_dut']
environment = self._create_environment()
diff --git a/lava_dispatcher/pipeline/actions/deploy/fastboot.py b/lava_dispatcher/pipeline/actions/deploy/fastboot.py
index a9b852e..07fdcb4 100644
--- a/lava_dispatcher/pipeline/actions/deploy/fastboot.py
+++ b/lava_dispatcher/pipeline/actions/deploy/fastboot.py
@@ -18,7 +18,6 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
-import os
from lava_dispatcher.pipeline.logical import Deployment
from lava_dispatcher.pipeline.connections.serial import ConnectDevice
from lava_dispatcher.pipeline.power import ResetDevice
@@ -154,6 +153,11 @@ class FastbootAction(DeployAction): # pylint:disable=too-many-instance-attribut
download.max_retries = 3 # overridden by failure_retry in the parameters, if set.
self.internal_pipeline.add_action(download)
self.internal_pipeline.add_action(ApplySystemAction())
+ if 'vendor' in image_keys:
+ download = DownloaderAction('vendor', self.fastboot_dir)
+ download.max_retries = 3 # overridden by failure_retry in the parameters, if set.
+ self.internal_pipeline.add_action(download)
+ self.internal_pipeline.add_action(ApplyVendorAction())
class EnterFastbootAction(DeployAction):
@@ -191,7 +195,7 @@ class EnterFastbootAction(DeployAction):
adb_serial_number, 'devices']
command_output = self.run_command(adb_cmd)
if command_output and adb_serial_number in command_output:
- self.logger.debug("Device is in adb: %s" % command_output)
+ self.logger.debug("Device is in adb: %s", command_output)
adb_cmd = ['lxc-attach', '-n', lxc_name, '--', 'adb',
'-s', adb_serial_number, 'reboot-bootloader']
command_output = self.run_command(adb_cmd)
@@ -205,7 +209,7 @@ class EnterFastbootAction(DeployAction):
fastboot_serial_number, 'devices']
command_output = self.run_command(fastboot_cmd)
if command_output and fastboot_serial_number in command_output:
- self.logger.debug("Device is in fastboot: %s" % command_output)
+ self.logger.debug("Device is in fastboot: %s", command_output)
fastboot_cmd = ['lxc-attach', '-n', lxc_name, '--', 'fastboot',
'-s', fastboot_serial_number, 'reboot-bootloader']
command_output = self.run_command(fastboot_cmd)
@@ -484,3 +488,42 @@ class ApplySystemAction(DeployAction):
raise JobError("Unable to apply system image using fastboot: %s" %
command_output) # FIXME: JobError needs a unit test
return connection
+
+
+class ApplyVendorAction(DeployAction):
+ """
+ Fastboot deploy vendor image.
+ """
+
+ def __init__(self):
+ super(ApplyVendorAction, self).__init__()
+ self.name = "fastboot_apply_vendor_action"
+ self.description = "fastboot apply vendor image"
+ self.summary = "fastboot apply vendor"
+ self.retries = 3
+ self.sleep = 10
+
+ def validate(self):
+ super(ApplyVendorAction, self).validate()
+ if 'download_action' not in self.data:
+ raise RuntimeError("download-action missing: %s" % self.name)
+ if 'file' not in self.data['download_action']['vendor']:
+ self.errors = "no file specified for fastboot vendor image"
+ if 'fastboot_serial_number' not in self.job.device:
+ self.errors = "device fastboot serial number missing"
+ if self.job.device['fastboot_serial_number'] == '0000000000':
+ self.errors = "device fastboot serial number unset"
+
+ def run(self, connection, args=None):
+ connection = super(ApplyVendorAction, self).run(connection, args)
+ lxc_name = self.get_common_data('lxc', 'name')
+ src = self.data['download_action']['vendor']['file']
+ dst = copy_to_lxc(lxc_name, src)
+ serial_number = self.job.device['fastboot_serial_number']
+ fastboot_cmd = ['lxc-attach', '-n', lxc_name, '--', 'fastboot',
+ '-s', serial_number, 'flash', 'vendor', dst]
+ command_output = self.run_command(fastboot_cmd)
+ if command_output and 'error' in command_output:
+ raise JobError("Unable to apply vendor image using fastboot: %s" %
+ command_output) # FIXME: JobError needs a unit test
+ return connection
diff --git a/lava_dispatcher/pipeline/actions/deploy/lxc.py b/lava_dispatcher/pipeline/actions/deploy/lxc.py
index 6957f67..cdff1cc 100644
--- a/lava_dispatcher/pipeline/actions/deploy/lxc.py
+++ b/lava_dispatcher/pipeline/actions/deploy/lxc.py
@@ -199,20 +199,28 @@ class LxcAddDeviceAction(Action):
connection = super(LxcAddDeviceAction, self).run(connection, args)
lxc_name = self.get_common_data('lxc', 'name')
if 'device_path' in list(self.job.device.keys()):
- # Wait USB_SHOW_UP_TIMEOUT seconds for the usb device to show up
- self.logger.info("Waiting %d seconds for usb device to show up" %
- USB_SHOW_UP_TIMEOUT)
- sleep(USB_SHOW_UP_TIMEOUT)
-
- device_path = os.path.realpath(self.job.device['device_path'])
- if os.path.isdir(device_path):
- devices = os.listdir(device_path)
+ device_path = self.job.device['device_path']
+ if not isinstance(device_path, list):
+ raise JobError("device_path should be a list")
+
+ if device_path:
+ # Wait USB_SHOW_UP_TIMEOUT seconds for usb device to show up
+ self.logger.info("Wait %d seconds for usb device to show up",
+ USB_SHOW_UP_TIMEOUT)
+ sleep(USB_SHOW_UP_TIMEOUT)
+
+ for path in device_path:
+ path = os.path.realpath(path)
+ if os.path.isdir(path):
+ devices = os.listdir(path)
+ else:
+ devices = [path]
+
+ for device in devices:
+ device = os.path.join(path, device)
+ lxc_cmd = ['lxc-device', '-n', lxc_name, 'add', device]
+ self.run_command(lxc_cmd)
+ self.logger.debug("%s: devices added from %s", lxc_name,
+ path)
else:
- devices = [device_path]
-
- for device in devices:
- device = os.path.join(device_path, device)
- lxc_cmd = ['lxc-device', '-n', lxc_name, 'add', device]
- self.run_command(lxc_cmd)
- self.logger.debug("%s: devices added from %s", lxc_name,
- device_path)
+ self.logger.debug("device_path is None")
diff --git a/lava_dispatcher/pipeline/actions/deploy/mount.py b/lava_dispatcher/pipeline/actions/deploy/mount.py
index ba86ea5..0214c70 100644
--- a/lava_dispatcher/pipeline/actions/deploy/mount.py
+++ b/lava_dispatcher/pipeline/actions/deploy/mount.py
@@ -121,8 +121,8 @@ class LoopCheckAction(DeployAction):
# when one is unmounted
if mounted_loops >= available_loops:
raise InfrastructureError("Insufficient loopback devices?")
- self.logger.debug("available loops: %s" % available_loops)
- self.logger.debug("mounted_loops: %s" % mounted_loops)
+ self.logger.debug("available loops: %s", available_loops)
+ self.logger.debug("mounted_loops: %s", mounted_loops)
return connection
@@ -175,7 +175,7 @@ class LoopMountAction(RetryAction):
return connection
def cleanup(self):
- self.logger.debug("%s cleanup" % self.name)
+ self.logger.debug("%s cleanup", self.name)
if self.mntdir:
if os.path.ismount(self.mntdir):
self.run_command(['umount', self.mntdir])
@@ -245,7 +245,7 @@ class Unmount(Action):
"""
connection = super(Unmount, self).run(connection, args)
mntdir = self.data['loop_mount']['mntdir']
- self.logger.debug("umounting %s" % mntdir)
+ self.logger.debug("umounting %s", mntdir)
if os.path.ismount(mntdir):
self.run_command(['umount', mntdir])
if os.path.isdir(mntdir):
diff --git a/lava_dispatcher/pipeline/actions/deploy/nfs.py b/lava_dispatcher/pipeline/actions/deploy/nfs.py
new file mode 100644
index 0000000..f82d0ee
--- /dev/null
+++ b/lava_dispatcher/pipeline/actions/deploy/nfs.py
@@ -0,0 +1,122 @@
+# Copyright (C) 2016 Linaro Limited
+#
+# Author: Dean Arnold <dean.arnold@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+# List just the subclasses supported for this base strategy
+# imported by the parser to populate the list of subclasses.
+
+from lava_dispatcher.pipeline.action import Pipeline
+from lava_dispatcher.pipeline.logical import Deployment
+from lava_dispatcher.pipeline.actions.deploy import DeployAction
+from lava_dispatcher.pipeline.actions.deploy.download import DownloaderAction
+from lava_dispatcher.pipeline.actions.deploy.apply_overlay import (
+ PrepareOverlayTftp,
+ ExtractNfsRootfs,
+ OverlayAction,
+ ExtractModules,
+ ApplyOverlayTftp,
+)
+from lava_dispatcher.pipeline.actions.deploy.environment import DeployDeviceEnvironment
+from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
+from lava_dispatcher.pipeline.utils.constants import DISPATCHER_DOWNLOAD_DIR
+
+
+def nfs_accept(device, parameters):
+ """
+ Each NFS deployment strategy uses these checks
+ as a base
+ """
+ if 'to' not in parameters:
+ return False
+ if parameters['to'] != 'nfs':
+ return False
+ if not device:
+ return False
+ if 'actions' not in device:
+ raise RuntimeError("Invalid device configuration")
+ if 'deploy' not in device['actions']:
+ return False
+ if 'methods' not in device['actions']['deploy']:
+ raise RuntimeError("Device misconfiguration")
+ return True
+
+
+class Nfs(Deployment):
+ """
+ Strategy class for a NFS deployment.
+ Downloads rootfs and deploys to NFS server on dispatcher
+ """
+
+ compatibility = 1
+
+ def __init__(self, parent, parameters):
+ super(Nfs, self).__init__(parent)
+ self.action = NfsAction()
+ self.action.section = self.action_type
+ self.action.job = self.job
+ parent.add_action(self.action, parameters)
+
+ @classmethod
+ def accepts(cls, device, parameters):
+ if not nfs_accept(device, parameters):
+ return False
+ if 'nfs' in device['actions']['deploy']['methods']:
+ return True
+ return False
+
+
+class NfsAction(DeployAction): # pylint:disable=too-many-instance-attributes
+
+ def __init__(self):
+ super(NfsAction, self).__init__()
+ self.name = "nfs-deploy"
+ self.description = "deploy nfsrootfs"
+ self.summary = "NFS deployment"
+ self.download_dir = DISPATCHER_DOWNLOAD_DIR
+ try:
+ self.download_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR)
+ except OSError:
+ # allows for unit tests to operate as normal user.
+ self.suffix = '/'
+
+ def validate(self):
+ super(NfsAction, self).validate()
+ if not self.valid:
+ return
+ if 'nfsrootfs' in self.parameters and 'nfs_url' in self.parameters:
+ self.errors = "Only one of nfsrootfs or nfs_url can be specified"
+ lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
+ self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
+
+ def populate(self, parameters):
+ self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+ if 'nfsrootfs' in parameters:
+ download = DownloaderAction('nfsrootfs', path=self.download_dir)
+ download.max_retries = 3
+ self.internal_pipeline.add_action(download)
+ if 'modules' in parameters:
+ download = DownloaderAction('modules', path=self.download_dir)
+ download.max_retries = 3
+ self.internal_pipeline.add_action(download)
+ # NfsAction is a deployment, so once the nfsrootfs has been deployed, just do the overlay
+ self.internal_pipeline.add_action(ExtractNfsRootfs())
+ self.internal_pipeline.add_action(OverlayAction())
+ self.internal_pipeline.add_action(ExtractModules())
+ self.internal_pipeline.add_action(ApplyOverlayTftp())
+ self.internal_pipeline.add_action(DeployDeviceEnvironment())
diff --git a/lava_dispatcher/pipeline/actions/deploy/overlay.py b/lava_dispatcher/pipeline/actions/deploy/overlay.py
index 4cdbba5..3dfbb84 100644
--- a/lava_dispatcher/pipeline/actions/deploy/overlay.py
+++ b/lava_dispatcher/pipeline/actions/deploy/overlay.py
@@ -81,6 +81,9 @@ class OverlayAction(DeployAction):
self.lava_test_dir = os.path.realpath(
'%s/../../../lava_test_shell' % os.path.dirname(__file__))
self.scripts_to_copy = []
+ self.lava_v2_test_dir = os.path.realpath(
+ '%s/../../../pipeline/lava_test_shell' % os.path.dirname(__file__))
+ self.v2_scripts_to_copy = []
# 755 file permissions
self.xmod = stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
@@ -90,10 +93,24 @@ class OverlayAction(DeployAction):
# Distro-specific scripts override the generic ones
distro = self.parameters['deployment_data']['distro']
distro_support_dir = '%s/distro/%s' % (self.lava_test_dir, distro)
+ lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir']
+ lava_test_results_dir = lava_test_results_dir % self.job.job_id
+ namespace = self.parameters.get('namespace', None)
+ if namespace:
+ self.action_namespaces.append(namespace)
+ self.set_common_data(namespace, 'lava_test_results_dir',
+ lava_test_results_dir)
+ lava_test_sh_cmd = self.parameters['deployment_data']['lava_test_sh_cmd']
+ self.set_common_data(namespace, 'lava_test_sh_cmd',
+ lava_test_sh_cmd)
for script in glob.glob(os.path.join(distro_support_dir, 'lava-*')):
self.scripts_to_copy.append(script)
+ for script in glob.glob(os.path.join(self.lava_v2_test_dir, 'lava-*')):
+ self.v2_scripts_to_copy.append(script)
if not self.scripts_to_copy:
self.errors = "Unable to locate lava_test_shell support scripts."
+ if not self.v2_scripts_to_copy:
+ self.errors = "Unable to update lava_test_shell support scripts."
if self.job.parameters.get('output_dir', None) is None:
self.errors = "Unable to use output directory."
@@ -147,6 +164,24 @@ class OverlayAction(DeployAction):
fout.write("#!%s\n\n" % shell)
fout.write(fin.read())
os.fchmod(fout.fileno(), self.xmod)
+ for fname in self.v2_scripts_to_copy:
+ with open(fname, 'r') as fin:
+ output_file = '%s/bin/%s' % (lava_path, os.path.basename(fname))
+ self.logger.debug("Updating %s", output_file)
+ with open(output_file, 'w') as fout:
+ fout.write("#!%s\n\n" % shell)
+ fout.write(fin.read())
+ os.fchmod(fout.fileno(), self.xmod)
+
+ # Generate the file containing the secrets
+ if 'secrets' in self.job.parameters:
+ self.logger.debug("Creating %s/secrets", lava_path)
+ with open(os.path.join(lava_path, 'secrets'), 'w') as fout:
+ for key, value in self.job.parameters['secrets'].items():
+ if key == 'yaml_line':
+ continue
+ fout.write("%s=%s\n" % (key, value))
+
connection = super(OverlayAction, self).run(connection, args)
return connection
@@ -163,6 +198,8 @@ class MultinodeOverlayAction(OverlayAction):
self.lava_multi_node_test_dir = os.path.realpath(
'%s/../../../lava_test_shell/multi_node' % os.path.dirname(__file__))
self.lava_multi_node_cache_file = '/tmp/lava_multi_node_cache.txt'
+ self.lava_v2_multi_node_test_dir = os.path.realpath(
+ '%s/../../../pipeline/lava_test_shell/multi_node/' % os.path.dirname(__file__))
self.role = None
self.protocol = MultinodeProtocol.name
@@ -184,8 +221,11 @@ class MultinodeOverlayAction(OverlayAction):
self.errors = "multinode job without a specified role"
else:
self.role = self.job.parameters['protocols'][self.protocol]['role']
+ # FIXME: rationalise all this when the V1 code is removed.
+ for script in glob.glob(os.path.join(self.lava_v2_multi_node_test_dir, 'lava-*')):
+ self.v2_scripts_to_copy.append(script)
- def run(self, connection, args=None):
+ def run(self, connection, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
if self.role is None:
self.logger.debug("skipped %s", self.name)
return connection
@@ -213,7 +253,8 @@ class MultinodeOverlayAction(OverlayAction):
lava_path = os.path.abspath("%s/%s" % (location, lava_test_results_dir))
scripts_to_copy = glob.glob(os.path.join(self.lava_multi_node_test_dir, 'lava-*'))
self.logger.debug(self.lava_multi_node_test_dir)
- self.logger.debug({"lava_path": lava_path, "scripts": scripts_to_copy})
+ self.logger.debug("lava_path: %s", lava_path)
+ self.logger.debug("scripts to copy %s", scripts_to_copy)
for fname in scripts_to_copy:
with open(fname, 'r') as fin:
@@ -243,6 +284,19 @@ class MultinodeOverlayAction(OverlayAction):
fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
fout.write(fin.read())
os.fchmod(fout.fileno(), self.xmod)
+ for fname in self.v2_scripts_to_copy:
+ with open(fname, 'r') as fin:
+ foutname = os.path.basename(fname)
+ output_file = '%s/bin/%s' % (lava_path, foutname)
+ self.logger.debug("Updating %s", output_file)
+ with open(output_file, 'w') as fout:
+ fout.write("#!%s\n\n" % shell)
+ fout.write("LAVA_TEST_BIN='%s/bin'\n" % lava_test_results_dir)
+ fout.write("LAVA_MULTI_NODE_CACHE='%s'\n" % self.lava_multi_node_cache_file)
+ # always write out full debug logs
+ fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
+ fout.write(fin.read())
+ os.fchmod(fout.fileno(), self.xmod)
self.call_protocols()
return connection
diff --git a/lava_dispatcher/pipeline/actions/deploy/removable.py b/lava_dispatcher/pipeline/actions/deploy/removable.py
index 1d0d650..f692891 100644
--- a/lava_dispatcher/pipeline/actions/deploy/removable.py
+++ b/lava_dispatcher/pipeline/actions/deploy/removable.py
@@ -34,10 +34,20 @@ from lava_dispatcher.pipeline.actions.deploy.overlay import (
CustomisationAction,
OverlayAction,
)
+from lava_dispatcher.pipeline.actions.deploy.apply_overlay import (
+ ApplyOverlayImage,
+)
from lava_dispatcher.pipeline.actions.deploy import DeployAction
from lava_dispatcher.pipeline.actions.deploy.environment import DeployDeviceEnvironment
from lava_dispatcher.pipeline.utils.network import dispatcher_ip
-from lava_dispatcher.pipeline.utils.constants import DISPATCHER_DOWNLOAD_DIR
+from lava_dispatcher.pipeline.utils.filesystem import (
+ mkdtemp,
+ tftpd_dir,
+)
+from lava_dispatcher.pipeline.utils.strings import substitute
+from lava_dispatcher.pipeline.utils.constants import (
+ SECONDARY_DEPLOYMENT_MSG,
+)
class Removable(Deployment):
@@ -69,11 +79,11 @@ class Removable(Deployment):
if 'to' in parameters:
# connection support
# Which deployment method to use?
- if 'usb' == parameters['to']:
+ if parameters['to'] == 'usb':
if 'device' in parameters:
job_device = parameters['device']
media = 'usb'
- if 'sata' == parameters['to']:
+ if parameters['to'] == 'sata':
if 'device' in parameters:
job_device = parameters['device']
media = 'sata'
@@ -107,10 +117,12 @@ class DDAction(Action):
super(DDAction, self).validate()
if 'device' not in self.parameters:
self.errors = "missing device for deployment"
- if 'download' not in self.parameters:
+ if 'tool' not in self.parameters['download']:
self.errors = "missing download tool for deployment"
- if not os.path.isabs(self.parameters['download']):
- self.errors = "download parameter needs to be an absolute path"
+ if 'options' not in self.parameters['download']:
+ self.errors = "missing options for download tool"
+ if not os.path.isabs(self.parameters['download']['tool']):
+ self.errors = "download tool parameter needs to be an absolute path"
uuid_required = False
self.boot_params = self.job.device['parameters']['media'][self.parameters['to']]
if 'media' in self.job.device:
@@ -151,30 +163,55 @@ class DDAction(Action):
raise JobError("Unable to find disk by id %s" %
self.boot_params[self.parameters['device']]['uuid'])
- suffix = "tmp"
- wget_options = ''
- if self.parameters.get('download', '/usr/bin/wget') == '/usr/bin/wget':
- wget_options = "--no-check-certificate --no-proxy --connect-timeout=30 -S --progress=dot:giga -O -"
- wget_cmd = "%s %s http://%s/%s/%s" % (
- self.parameters['download'], wget_options, dispatcher_ip(), suffix, decompressed_image
+ suffix = "%s/%s" % ("tmp", self.data['storage-deploy'].get('suffix', ''))
+
+ # As the test writer can use any tool we cannot predict where the
+ # download URL will be positioned in the download command.
+ # Providing the download URL as a substitution option gets round this
+ download_url = "http://%s/%s/%s" % (
+ dispatcher_ip(), suffix, decompressed_image
)
+ substitutions = {
+ '{DOWNLOAD_URL}': download_url
+ }
+ download_options = substitute([self.parameters['download']['options']], substitutions)[0]
+ download_cmd = "%s %s" % (
+ self.parameters['download']['tool'], download_options
+ )
+
dd_cmd = "dd of='%s' bs=4M" % device_path # busybox dd does not support other flags
- connection.sendline("%s | %s" % (wget_cmd, dd_cmd))
- # connection.wait() # long command lines can echo back
- # connection.wait()
+
+ # We must ensure that the secondary media deployment has completed before handing over
+ # the connection. Echoing the SECONDARY_DEPLOYMENT_MSG after the deployment means we
+ # always have a constant string to match against
+ prompt_string = connection.prompt_str
+ connection.prompt_str = SECONDARY_DEPLOYMENT_MSG
+ self.logger.debug("Changing prompt to %s" % connection.prompt_str)
+ connection.sendline("%s | %s ; echo %s" % (download_cmd, dd_cmd, SECONDARY_DEPLOYMENT_MSG))
+ self.wait(connection)
if not self.valid:
self.logger.error(self.errors)
+ # set prompt back
+ connection.prompt_str = prompt_string
+ self.logger.debug("Changing prompt to %s" % connection.prompt_str)
return connection
-class MassStorage(DeployAction):
+class MassStorage(DeployAction): # pylint: disable=too-many-instance-attributes
def __init__(self):
super(MassStorage, self).__init__()
self.name = "storage-deploy"
self.description = "Deploy image to mass storage"
self.summary = "write image to storage"
+ self.suffix = None
+ try:
+ self.image_path = mkdtemp(basedir=tftpd_dir())
+ except OSError:
+ self.suffix = '/'
+ self.image_path = mkdtemp() # unit test support
+ self.suffix = os.path.basename(self.image_path)
def validate(self):
super(MassStorage, self).validate()
@@ -188,6 +225,9 @@ class MassStorage(DeployAction):
self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id
if 'device' in self.parameters:
self.set_common_data('u-boot', 'device', self.parameters['device'])
+ if self.suffix:
+ self.data[self.name].setdefault('suffix', self.suffix)
+ self.data[self.name].setdefault('suffix', os.path.basename(self.image_path))
def populate(self, parameters):
"""
@@ -199,12 +239,13 @@ class MassStorage(DeployAction):
but not the device.
"""
self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
+ self.internal_pipeline.add_action(CustomisationAction())
+ self.internal_pipeline.add_action(OverlayAction()) # idempotent, includes testdef
if 'image' in parameters:
- download = DownloaderAction('image', path=DISPATCHER_DOWNLOAD_DIR)
+ download = DownloaderAction('image', path=self.image_path)
download.max_retries = 3
self.internal_pipeline.add_action(download)
+ self.internal_pipeline.add_action(ApplyOverlayImage())
self.internal_pipeline.add_action(DDAction())
# FIXME: could support tarballs too
- self.internal_pipeline.add_action(CustomisationAction())
- self.internal_pipeline.add_action(OverlayAction()) # idempotent, includes testdef
self.internal_pipeline.add_action(DeployDeviceEnvironment())
diff --git a/lava_dispatcher/pipeline/actions/deploy/strategies.py b/lava_dispatcher/pipeline/actions/deploy/strategies.py
index 69a2370..49e2b4c 100644
--- a/lava_dispatcher/pipeline/actions/deploy/strategies.py
+++ b/lava_dispatcher/pipeline/actions/deploy/strategies.py
@@ -30,3 +30,4 @@ from lava_dispatcher.pipeline.actions.deploy.ssh import Ssh
from lava_dispatcher.pipeline.actions.deploy.fastboot import Fastboot
from lava_dispatcher.pipeline.actions.deploy.lxc import Lxc
from lava_dispatcher.pipeline.actions.deploy.iso import DeployIso
+from lava_dispatcher.pipeline.actions.deploy.nfs import Nfs
diff --git a/lava_dispatcher/pipeline/actions/deploy/testdef.py b/lava_dispatcher/pipeline/actions/deploy/testdef.py
index aeb80d4..7bde413 100644
--- a/lava_dispatcher/pipeline/actions/deploy/testdef.py
+++ b/lava_dispatcher/pipeline/actions/deploy/testdef.py
@@ -26,7 +26,6 @@ import base64
import hashlib
import tarfile
import shutil
-from uuid import uuid4
from collections import OrderedDict
from nose.tools import nottest
from lava_dispatcher.pipeline.action import (
@@ -39,6 +38,7 @@ from lava_dispatcher.pipeline.action import (
from lava_dispatcher.pipeline.actions.test import TestAction
from lava_dispatcher.pipeline.utils.strings import indices
from lava_dispatcher.pipeline.utils.vcs import BzrHelper, GitHelper
+from lava_dispatcher.pipeline.utils.constants import DEFAULT_V1_FIXUP, DEFAULT_V1_PATTERN
def identify_test_definitions(parameters):
@@ -133,13 +133,9 @@ class RepoAction(Action):
self.summary = "repo base class"
self.vcs = None
self.runner = None
- self.default_pattern = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))"
- self.default_fixupdict = {'PASS': 'pass', 'FAIL': 'fail', 'SKIP': 'skip', 'UNKNOWN': 'unknown'}
- # FIXME: sort out a genuinely unique ID based on the *database* JobID and pipeline level for reproducibility
- # {DB-JobID}-{PipelineLevel}, e.g. 15432.0-3.5.4
- # delay until jobs can be scheduled from the UI.
- # allows individual testcase results to be at a predictable URL
- self.uuid = str(uuid4())
+ self.default_pattern = DEFAULT_V1_PATTERN
+ self.default_fixupdict = DEFAULT_V1_FIXUP
+ self.uuid = None
@classmethod
def select(cls, repo_type):
@@ -168,7 +164,18 @@ class RepoAction(Action):
raise RuntimeError("RepoAction validate called super without setting the vcs")
if not os.path.exists(self.vcs.binary):
self.errors = "%s is not installed on the dispatcher." % self.vcs.binary
+ # a genuinely unique ID based on the *database* JobID and pipeline level for reproducibility
+ # and tracking - {DB-JobID}_{PipelineLevel}, e.g. 15432.0_3.5.4
+ self.uuid = "%s_%s" % (self.job.job_id, self.level)
super(RepoAction, self).validate()
+ # list of levels involved in the repo actions for this overlay
+ uuid_list = self.get_common_data('repo-action', 'uuid-list')
+ if uuid_list:
+ if self.uuid not in uuid_list:
+ uuid_list.append(self.uuid)
+ else:
+ uuid_list = [self.uuid]
+ self.set_common_data('repo-action', 'uuid-list', uuid_list)
def run(self, connection, args=None):
"""
@@ -178,6 +185,7 @@ class RepoAction(Action):
unpack an overlay.tgz after mounting.
"""
connection = super(RepoAction, self).run(connection, args)
+ # FIXME: standardise on set_common_data and get_common_data
self.data.setdefault('test', {})
self.data['test'].setdefault(self.uuid, {})
self.data['test'][self.uuid].setdefault('runner_path', {})
@@ -242,9 +250,14 @@ class RepoAction(Action):
if 'parse' in testdef:
pattern = testdef['parse'].get('pattern', '')
+ fixup = testdef['parse'].get('fixupdict', '')
else:
pattern = self.default_pattern
- self.data['test'][self.uuid]['testdef_pattern'] = {'pattern': pattern}
+ fixup = self.default_fixupdict
+ self.data['test'][self.uuid].setdefault('testdef_pattern', {})
+ self.data['test'][self.uuid]['testdef_pattern'].update({'pattern': pattern})
+ self.data['test'][self.uuid]['testdef_pattern'].update({'fixupdict': fixup})
+ self.logger.debug("uuid=%s testdef=%s" % (self.uuid, self.data['test'][self.uuid]['testdef_pattern']))
class GitRepoAction(RepoAction): # pylint: disable=too-many-public-methods
@@ -298,6 +311,7 @@ class GitRepoAction(RepoAction): # pylint: disable=too-many-public-methods
if os.path.exists(runner_path):
shutil.rmtree(runner_path)
+ self.logger.info("Fetching tests from %s", self.parameters['repository'])
commit_id = self.vcs.clone(runner_path, self.parameters.get('revision', None))
if commit_id is None:
raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs.binary, self.parameters))
@@ -306,8 +320,8 @@ class GitRepoAction(RepoAction): # pylint: disable=too-many-public-methods
'repository': self.parameters['repository'], 'path': self.parameters['path']}
# now read the YAML to create a testdef dict to retrieve metadata
- self.logger.debug(os.path.join(runner_path, self.parameters['path']))
yaml_file = os.path.join(runner_path, self.parameters['path'])
+ self.logger.debug("Tests stored (tmp) in %s", yaml_file)
if not os.path.exists(yaml_file):
raise JobError("Unable to find test definition YAML: %s" % yaml_file)
with open(yaml_file, 'r') as test_file:
@@ -567,16 +581,16 @@ class TestDefinitionAction(TestAction):
files are created by TestOverlayAction. More complex scripts like the
install:deps script and the main run script have custom Actions.
"""
- index = {}
+ index = OrderedDict()
self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
self.test_list = identify_test_definitions(self.job.parameters)
if not self.test_list:
return
+ self.set_common_data(self.name, 'test_list', self.test_list[0])
for testdefs in self.test_list:
for testdef in testdefs:
- # FIXME: only run the tests defined for this test action, not all the jobs for this deployment/job
- # This includes only running the install steps for the relevant deployment as the next deployment
- # could be a different OS.
+ # namespace support allows only running the install steps for the relevant
+ # deployment as the next deployment could be a different OS.
handler = RepoAction.select(testdef['from'])()
# set the full set of job YAML parameters for this handler as handler parameters.
@@ -584,6 +598,8 @@ class TestDefinitionAction(TestAction):
handler.parameters = testdef
# store the correct test_name before incrementing the local index dict
handler.parameters['test_name'] = "%s_%s" % (len(list(index.keys())), handler.parameters['name'])
+ self.internal_pipeline.add_action(handler)
+ handler.uuid = "%s_%s" % (self.job.job_id, handler.level)
# copy details into the overlay, one per handler but the same class each time.
overlay = TestOverlayAction()
@@ -607,12 +623,12 @@ class TestDefinitionAction(TestAction):
runsh.test_uuid = handler.uuid
index[len(list(index.keys()))] = handler.parameters['name']
- self.internal_pipeline.add_action(handler)
# add overlay handlers to the pipeline
self.internal_pipeline.add_action(overlay)
self.internal_pipeline.add_action(installer)
self.internal_pipeline.add_action(runsh)
+ self.set_common_data(self.name, 'testdef_index', index)
def validate(self):
"""
@@ -693,30 +709,42 @@ class TestOverlayAction(TestAction): # pylint: disable=too-many-instance-attrib
def validate(self):
if 'path' not in self.parameters:
self.errors = "Missing path in parameters"
- test_list = identify_test_definitions(self.job.parameters)
- for testdef in test_list[0]:
+ test_list = self.get_common_data('test-definition', 'test_list')
+ for testdef in test_list:
if 'parameters' in testdef: # optional
if not isinstance(testdef['parameters'], dict):
self.errors = "Invalid test definition parameters"
def handle_parameters(self, testdef):
- ret_val = ['###default parameters from yaml###\n']
+ ret_val = ['###default parameters from test definition###\n']
if 'params' in testdef:
for def_param_name, def_param_value in list(testdef['params'].items()):
if def_param_name is 'yaml_line':
continue
ret_val.append('%s=\'%s\'\n' % (def_param_name, def_param_value))
+ if 'parameters' in testdef:
+ for def_param_name, def_param_value in list(testdef['parameters'].items()):
+ if def_param_name is 'yaml_line':
+ continue
+ ret_val.append('%s=\'%s\'\n' % (def_param_name, def_param_value))
ret_val.append('######\n')
# inject the parameters that were set in job submission.
- ret_val.append('###test parameters from json###\n')
+ ret_val.append('###test parameters from job submission###\n')
if 'parameters' in self.parameters and self.parameters['parameters'] != '':
# turn a string into a local variable.
for param_name, param_value in list(self.parameters['parameters'].items()):
if param_name is 'yaml_line':
continue
ret_val.append('%s=\'%s\'\n' % (param_name, param_value))
- self.logger.debug('%s=\'%s\'' % (param_name, param_value))
+ self.logger.debug("%s='%s'", param_name, param_value)
+ if 'params' in self.parameters and self.parameters['params'] != '':
+ # turn a string into a local variable.
+ for param_name, param_value in list(self.parameters['params'].items()):
+ if param_name is 'yaml_line':
+ continue
+ ret_val.append('%s=\'%s\'\n' % (param_name, param_value))
+ self.logger.debug("%s='%s'", param_name, param_value)
ret_val.append('######\n')
return ret_val
@@ -751,7 +779,7 @@ class TestOverlayAction(TestAction): # pylint: disable=too-many-instance-attrib
self.results = {
'success': self.test_uuid,
- 'name': testdef['metadata']['name'],
+ 'name': self.parameters['name'],
'path': self.parameters['path'],
'from': self.parameters['from'],
}
@@ -807,7 +835,6 @@ class TestInstallAction(TestOverlayAction):
self.results = {'skipped %s' % self.name: self.test_uuid}
return
- # hostdir = self.data['test'][self.test_uuid]['overlay_path'][self.parameters['test_name']]
filename = '%s/install.sh' % runner_path
content = self.handle_parameters(testdef)
@@ -860,6 +887,25 @@ class TestRunnerAction(TestOverlayAction):
self.name = "test-runscript-overlay"
self.description = "overlay run script onto image"
self.summary = "applying LAVA test run script"
+ self.testdef_levels = {} # allow looking up the testname from the level of this action
+
+ def validate(self):
+ testdef_index = self.get_common_data('test-definition', 'testdef_index')
+ if not testdef_index:
+ self.errors = "Unable to identify test definition index"
+ # convert from testdef_index {0: 'smoke-tests', 1: 'singlenode-advanced'}
+ # to self.testdef_levels {'1.3,4,1': '0_smoke-tests', ...}
+ for count, name in testdef_index.items():
+ if self.parameters['name'] == name:
+ self.testdef_levels[self.level] = "%s_%s" % (count, name)
+ if not self.testdef_levels:
+ self.errors = "Unable to identify test definition names"
+ current = self.get_common_data(self.name, 'testdef_levels')
+ if current:
+ current.update(self.testdef_levels)
+ else:
+ current = self.testdef_levels
+ self.set_common_data(self.name, 'testdef_levels', current)
def run(self, connection, args=None):
connection = super(TestRunnerAction, self).run(connection, args)
@@ -869,6 +915,7 @@ class TestRunnerAction(TestOverlayAction):
if not os.path.exists(yaml_file):
raise JobError("Unable to find test definition YAML: %s" % yaml_file)
+ testdef_levels = self.get_common_data('test-runscript-overlay', 'testdef_levels')
with open(yaml_file, 'r') as test_file:
testdef = yaml.safe_load(test_file)
@@ -876,19 +923,18 @@ class TestRunnerAction(TestOverlayAction):
content = self.handle_parameters(testdef)
# the 'lava' testdef name is reserved
- if testdef['metadata']['name'] == 'lava':
+ if self.parameters['name'] == 'lava':
raise TestError('The "lava" test definition name is reserved.')
with open(filename, 'a') as runsh:
for line in content:
runsh.write(line)
runsh.write('set -e\n')
- runsh.write('export TESTRUN_ID=%s\n' % testdef['metadata']['name'])
+ # use the testdef_index value for the testrun name to handle repeats at source
+ runsh.write('export TESTRUN_ID=%s\n' % testdef_levels[self.level])
runsh.write('cd %s\n' % self.data['test'][self.test_uuid]['runner_path'][self.parameters['test_name']])
runsh.write('UUID=`cat uuid`\n')
runsh.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
- runsh.write('#wait for an ack from the dispatcher\n')
- runsh.write('read\n')
steps = testdef['run'].get('steps', [])
if steps:
for cmd in steps:
@@ -896,13 +942,11 @@ class TestRunnerAction(TestOverlayAction):
cmd = re.sub(r'\$(\d+)\b', r'\\$\1', cmd)
runsh.write('%s\n' % cmd)
runsh.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')
- runsh.write('#wait for an ack from the dispatcher\n')
- runsh.write('read\n')
self.results = {
'success': self.test_uuid,
"filename": filename,
- 'name': testdef['metadata']['name'],
+ 'name': self.parameters['name'],
'path': self.parameters['path'],
'from': self.parameters['from'],
}
diff --git a/lava_dispatcher/pipeline/actions/test/multinode.py b/lava_dispatcher/pipeline/actions/test/multinode.py
index fa9bff1..ffdb906 100644
--- a/lava_dispatcher/pipeline/actions/test/multinode.py
+++ b/lava_dispatcher/pipeline/actions/test/multinode.py
@@ -55,6 +55,9 @@ class MultinodeTestAction(TestShellAction):
self.name = "multinode-test"
self.description = "Executing lava-test-runner"
self.summary = "Multinode Lava Test Shell"
+ self.multinode_dict = {
+ 'multinode': r'<LAVA_MULTI_NODE> <LAVA_(\S+) ([^>]+)>',
+ }
def validate(self):
super(MultinodeTestAction, self).validate()
@@ -66,11 +69,13 @@ class MultinodeTestAction(TestShellAction):
if not self.valid:
self.errors = "Invalid base class TestAction"
return
- self.patterns.update({
- 'multinode': r'<LAVA_MULTI_NODE> <LAVA_(\S+) ([^>]+)>',
- })
+ self.patterns.update(self.multinode_dict)
self.signal_director.setup(self.parameters)
+ def _reset_patterns(self):
+ super(MultinodeTestAction, self)._reset_patterns()
+ self.patterns.update(self.multinode_dict)
+
def populate(self, parameters):
"""
Select the appropriate protocol supported by this action from the list available from the job
@@ -145,6 +150,7 @@ class MultinodeTestAction(TestShellAction):
else:
message_str = ""
self.connection.sendline("<LAVA_SYNC_COMPLETE%s>" % message_str)
+ self.connection.sendline('\n')
def _on_wait(self, message_id):
self.logger.debug("Handling signal <LAVA_WAIT %s>" % message_id)
@@ -160,6 +166,7 @@ class MultinodeTestAction(TestShellAction):
for key, value in messages.items():
message_str += " %s:%s=%s" % (target, key, value)
self.connection.sendline("<LAVA_WAIT_COMPLETE%s>" % message_str)
+ self.connection.sendline('\n')
def _on_wait_all(self, message_id, role=None):
self.logger.debug("Handling signal <LAVA_WAIT_ALL %s>" % message_id)
@@ -177,3 +184,4 @@ class MultinodeTestAction(TestShellAction):
for key, value in messages.items():
message_str += " %s:%s=%s" % (target, key, value)
self.connection.sendline("<LAVA_WAIT_ALL_COMPLETE%s>" % message_str)
+ self.connection.sendline('\n')
diff --git a/lava_dispatcher/pipeline/actions/test/shell.py b/lava_dispatcher/pipeline/actions/test/shell.py
index 2cdc429..9506ab9 100644
--- a/lava_dispatcher/pipeline/actions/test/shell.py
+++ b/lava_dispatcher/pipeline/actions/test/shell.py
@@ -18,6 +18,8 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
+import re
+import sys
import time
import logging
import pexpect
@@ -31,6 +33,7 @@ from lava_dispatcher.pipeline.action import (
InfrastructureError,
Pipeline,
JobError,
+ TestError
)
from lava_dispatcher.pipeline.logical import (
LavaTest,
@@ -40,9 +43,15 @@ from lava_dispatcher.pipeline.connection import (
BaseSignalHandler,
SignalMatch
)
-from lava_dispatcher.pipeline.utils.constants import DEFAULT_SHELL_PROMPT
+from lava_dispatcher.pipeline.utils.constants import (
+ DEFAULT_SHELL_PROMPT,
+ DEFAULT_V1_PATTERN,
+ DEFAULT_V1_FIXUP,
+)
+if sys.version > '3':
+ from functools import reduce # pylint: disable=redefined-builtin
-# pylint: disable=too-many-branches,too-many-statements,too-many-instance-attributes
+# pylint: disable=too-many-branches,too-many-statements,too-many-instance-attributes,logging-not-lazy
class TestShell(LavaTest):
@@ -74,6 +83,54 @@ class TestShellRetry(RetryAction):
self.internal_pipeline.add_action(TestShellAction())
+# FIXME: move to utils and call inside the overlay
+class PatternFixup(object):
+
+ def __init__(self, testdef, count):
+ """
+ Like all good arrays, the count is expected to start at zero.
+ Avoid calling from validate() or populate() - this needs the
+ RepoAction to be running.
+ """
+ super(PatternFixup, self).__init__()
+ self.pat = DEFAULT_V1_PATTERN
+ self.fixup = DEFAULT_V1_FIXUP
+ if isinstance(testdef, dict) and 'metadata' in testdef:
+ self.testdef = testdef
+ self.name = "%d_%s" % (count, reduce(dict.get, ['metadata', 'name'], testdef))
+ else:
+ self.testdef = {}
+ self.name = None
+
+ def valid(self):
+ return self.fixupdict() and self.pattern() and self.name
+
+ def update(self, pattern, fixupdict):
+ if not isinstance(pattern, str):
+ raise TestError("Unrecognised test parse pattern type: %s" % type(pattern))
+ try:
+ self.pat = re.compile(pattern, re.M)
+ except re.error as exc:
+ raise TestError("Error parsing regular expression %r: %s" % (self.pat, exc.message))
+ self.fixup = fixupdict
+
+ def fixupdict(self):
+ if 'parse' in self.testdef and 'fixupdict' in self.testdef['parse']:
+ self.fixup = self.testdef['parse']['fixupdict']
+ return self.fixup
+
+ def pattern(self):
+ if 'parse' in self.testdef and 'pattern' in self.testdef['parse']:
+ self.pat = self.testdef['parse']['pattern']
+ if not isinstance(self.pat, str):
+ raise TestError("Unrecognised test parse pattern type: %s" % type(self.pat))
+ try:
+ self.pat = re.compile(self.pat, re.M)
+ except re.error as exc:
+ raise TestError("Error parsing regular expression %r: %s" % (self.pat, exc.message))
+ return self.pat
+
+
class TestShellAction(TestAction):
"""
Sets up and runs the LAVA Test Shell Definition scripts.
@@ -88,24 +145,32 @@ class TestShellAction(TestAction):
self.name = "lava-test-shell"
self.signal_director = self.SignalDirector(None) # no default protocol
self.patterns = {}
- self.match = SignalMatch()
+ self.signal_match = SignalMatch()
self.definition = None
self.testset_name = None # FIXME
self.report = {}
self.start = None
+ self.testdef_dict = {}
+ # noinspection PyTypeChecker
+ self.pattern = PatternFixup(testdef=None, count=0)
- def validate(self):
- if "definitions" in self.parameters:
- for testdef in self.parameters["definitions"]:
- if "repository" not in testdef:
- self.errors = "Repository missing from test definition"
+ def _reset_patterns(self):
# Extend the list of patterns when creating subclasses.
- self.patterns.update({
+ self.patterns = {
"exit": "<LAVA_TEST_RUNNER>: exiting",
"eof": pexpect.EOF,
"timeout": pexpect.TIMEOUT,
"signal": r"<LAVA_SIGNAL_(\S+) ([^>]+)>",
- })
+ }
+ # noinspection PyTypeChecker
+ self.pattern = PatternFixup(testdef=None, count=0)
+
+ def validate(self):
+ if "definitions" in self.parameters:
+ for testdef in self.parameters["definitions"]:
+ if "repository" not in testdef:
+ self.errors = "Repository missing from test definition"
+ self._reset_patterns()
super(TestShellAction, self).validate()
def run(self, connection, args=None):
@@ -130,31 +195,25 @@ class TestShellAction(TestAction):
self.signal_director.connection = connection
+ pattern_dict = {self.pattern.name: self.pattern}
+ # pattern dictionary is the lookup from the STARTRUN to the parse pattern.
+ self.set_common_data(self.name, 'pattern_dictionary', pattern_dict)
+
self.logger.info("Executing test definitions using %s" % connection.name)
if not connection.prompt_str:
connection.prompt_str = [DEFAULT_SHELL_PROMPT]
# FIXME: This should be logged whenever prompt_str is changed, by the connection object.
self.logger.debug("Setting default test shell prompt %s", connection.prompt_str)
- self.logger.debug("Setting default timeout: %s" % self.timeout.duration)
connection.timeout = self.connection_timeout
self.wait(connection)
- pre_command_list = self.get_common_data(self.name, 'pre-command-list')
+ # use the string instead of self.name so that inheriting classes (like multinode)
+ # still pick up the correct command.
+ pre_command_list = self.get_common_data("lava-test-shell", 'pre-command-list')
if pre_command_list:
for command in pre_command_list:
connection.sendline(command)
- # FIXME: a predictable UID could be calculated from existing data here.
- # instead, uuid is read from the params to set _current_handler
- # FIXME: can only be run once per TestAction, so collate all patterns for all test definitions.
- # (or work out the uuid from the signal params?)
-
- # FIXME: not being set
- if self.signal_director.test_uuid:
- self.patterns.update({
- "test_case": self.data["test"][self.signal_director.test_uuid]["testdef_pattern"]["pattern"],
- })
-
with connection.test_connection() as test_connection:
# the structure of lava-test-runner means that there is just one TestAction and it must run all definitions
test_connection.sendline(
@@ -163,8 +222,13 @@ class TestShellAction(TestAction):
self.data["lava_test_results_dir"]),
delay=self.character_delay)
- if self.timeout:
+ self.logger.info("Test shell will use the higher of the action timeout and connection timeout.")
+ if self.timeout.duration > self.connection_timeout.duration:
+ self.logger.info("Setting action timeout: %.0f seconds" % self.timeout.duration)
test_connection.timeout = self.timeout.duration
+ else:
+ self.logger.info("Setting connection timeout: %.0f seconds" % self.connection_timeout.duration)
+ test_connection.timeout = self.connection_timeout.duration
while self._keep_running(test_connection, test_connection.timeout, connection.check_char):
pass
@@ -172,7 +236,42 @@ class TestShellAction(TestAction):
self.logger.debug(self.report)
return connection
- def check_patterns(self, event, test_connection, check_char):
+ def parse_v2_case_result(self, data, fixupdict=None):
+ # FIXME: Ported from V1 - still needs integration
+ if not fixupdict:
+ fixupdict = {}
+ res = {}
+ for key in data:
+ res[key] = data[key]
+
+ if key == 'measurement':
+ # Measurement accepts non-numeric values, but be careful with
+ # special characters including space, which may distrupt the
+ # parsing.
+ res[key] = res[key]
+
+ elif key == 'result':
+ if res['result'] in fixupdict:
+ res['result'] = fixupdict[res['result']]
+ if res['result'] not in ('pass', 'fail', 'skip', 'unknown'):
+ logging.error('Bad test result: %s', res['result'])
+ res['result'] = 'unknown'
+
+ if 'test_case_id' not in res:
+ self.logger.warning(
+ """Test case results without test_case_id (probably a sign of an """
+ """incorrect parsing pattern being used): %s""", res)
+
+ if 'result' not in res:
+ self.logger.warning(
+ """Test case results without result (probably a sign of an """
+ """incorrect parsing pattern being used): %s""", res)
+ self.logger.warning('Setting result to "unknown"')
+ res['result'] = 'unknown'
+
+ return res
+
+ def check_patterns(self, event, test_connection, check_char): # pylint: disable=too-many-locals
"""
Defines the base set of pattern responses.
Stores the results of testcases inside the TestAction
@@ -189,8 +288,6 @@ class TestShellAction(TestAction):
self.testset_name = None
elif event == "timeout":
- # if target.is_booted():
- # target.reset_boot()
self.logger.warning("err: lava_test_shell has timed out")
self.errors = "lava_test_shell has timed out"
self.testset_name = None
@@ -203,32 +300,49 @@ class TestShellAction(TestAction):
self.signal_director.test_uuid = params[1]
self.definition = params[0]
uuid = params[1]
+ self.start = time.time()
self.logger.debug("Starting test definition: %s" % self.definition)
- # self._handle_testrun(params)
+ self.logger.info("Starting test lava.%s (%s)", self.definition, uuid)
self.start = time.time()
+ # set the pattern for this run from pattern_dict
+ testdef_index = self.get_common_data('test-definition', 'testdef_index')
+ uuid_list = self.get_common_data('repo-action', 'uuid-list')
+ for key, value in testdef_index.items():
+ if self.definition == "%s_%s" % (key, value):
+ pattern = self.job.context['test'][uuid_list[key]]['testdef_pattern']['pattern']
+ fixup = self.job.context['test'][uuid_list[key]]['testdef_pattern']['fixupdict']
+ self.patterns.update({'test_case_result': re.compile(pattern, re.M)})
+ self.pattern.update(pattern, fixup)
+ self.logger.info("Enabling test definition pattern %r" % pattern)
self.logger.results({
- 'test_definition': 'lava',
- self.definition: {
- 'test_definition_start': self.definition,
- 'success': uuid
- }
+ "definition": "lava",
+ "case": self.definition,
+ "uuid": uuid,
+ # The test is marked as failed and updated to "pass" when finished.
+ # If something goes wrong then it will stay to "fail".
+ "result": "fail"
})
elif name == "ENDRUN":
self.definition = params[0]
uuid = params[1]
- self.logger.debug("Ending test definition: %s" % self.definition)
+ # remove the pattern for this run from pattern_dict
+ self._reset_patterns()
+ self.logger.info("Ending use of test pattern.")
+ self.logger.info("Ending test lava.%s (%s), duration %.02f",
+ self.definition, uuid,
+ time.time() - self.start)
self.logger.results({
- 'test_definition': 'lava',
- self.definition: {
- 'success': uuid,
- "duration": "%.02f" % (time.time() - self.start)
- }
+ "definition": "lava",
+ "case": self.definition,
+ "uuid": uuid,
+ "duration": "%.02f" % (time.time() - self.start),
+ "result": "pass"
})
self.start = None
elif name == "TESTCASE":
data = handle_testcase(params)
- res = self.match.match(data) # FIXME: rename!
- self.logger.debug("res: %s data: %s" % (res, data))
+ # get the fixup from the pattern_dict
+ res = self.signal_match.match(data, fixupdict=self.pattern.fixupdict())
p_res = self.data["test"][
self.signal_director.test_uuid
].setdefault("results", OrderedDict())
@@ -239,24 +353,34 @@ class TestShellAction(TestAction):
raise JobError(
"Duplicate test_case_id in results: %s",
res["test_case_id"])
-
+ # check for measurements
+ calc = {}
+ if 'measurement' in res:
+ calc['measurement'] = res['measurement']
+ if 'measurement' in res and 'units' in res:
+ calc['units'] = res['units']
# turn the result dict inside out to get the unique
# test_case_id/testset_name as key and result as value
if self.testset_name:
- self.logger.debug("result: %s" % res)
- self.logger.results({
- 'test_definition': self.definition,
- 'test_set': self.testset_name,
- res["test_case_id"]: res["result"]})
+ res_data = {
+ 'definition': self.definition,
+ 'case': res["test_case_id"],
+ 'set': self.testset_name,
+ 'result': res["result"]
+ }
+ res_data.update(calc)
+ self.logger.results(res_data)
self.report.update({
- 'test_set': self.testset_name,
- res["test_case_id"]: res["result"]
- })
+ "set": self.testset_name,
+ "case": res["test_case_id"],
+ "result": res["result"]})
else:
- self.logger.debug("result: %s" % res)
- self.logger.results({
- 'test_definition': self.definition,
- res["test_case_id"]: res["result"]})
+ res_data = {
+ 'definition': self.definition,
+ 'case': res["test_case_id"],
+ 'result': res["result"]}
+ res_data.update(calc)
+ self.logger.results(res_data)
self.report.update({
res["test_case_id"]: res["result"]
})
@@ -278,29 +402,42 @@ class TestShellAction(TestAction):
self.signal_director.signal(name, params)
except KeyboardInterrupt:
raise KeyboardInterrupt
- # force output in case there was none but minimal content to increase speed.
- test_connection.sendline(check_char)
ret_val = True
elif event == "test_case":
match = test_connection.match
if match is pexpect.TIMEOUT:
- # if target.is_booted():
- # target.reset_boot()
self.logger.warning("err: lava_test_shell has timed out (test_case)")
else:
- res = self.match.match(match.groupdict()) # FIXME: rename!
+ res = self.signal_match.match(match.groupdict())
self.logger.debug("outer_loop_result: %s" % res)
- # self.data["test"][self.signal_director.test_uuid].setdefault("results", {})
- # self.data["test"][self.signal_director.test_uuid]["results"].update({
- # {res["test_case_id"]: res}
- # })
ret_val = True
+ elif event == 'test_case_result':
+ res = test_connection.match.groupdict()
+ if res:
+ # FIXME: make this a function
+ # check for measurements
+ calc = {}
+ if 'measurement' in res:
+ calc['measurement'] = res['measurement']
+ if 'measurement' in res and 'units' in res:
+ calc['units'] = res['units']
+ res_data = {
+ 'definition': self.definition,
+ 'case': res["test_case_id"],
+ 'result': res["result"]}
+ res_data.update(calc)
+ self.logger.results(res_data)
+ self.report.update({
+ res["test_case_id"]: res["result"]
+ })
+ ret_val = True
return ret_val
def _keep_running(self, test_connection, timeout, check_char):
- self.logger.debug("test shell timeout: %d seconds" % timeout)
+ if 'test_case_results' in self.patterns:
+ self.logger.info("Test case result pattern: %r" % self.patterns['test_case_results'])
retval = test_connection.expect(list(self.patterns.values()), timeout=timeout)
return self.check_patterns(list(self.patterns.keys())[retval], test_connection, check_char)
diff --git a/lava_dispatcher/pipeline/connection.py b/lava_dispatcher/pipeline/connection.py
index 265406a4..d3fad30 100644
--- a/lava_dispatcher/pipeline/connection.py
+++ b/lava_dispatcher/pipeline/connection.py
@@ -235,7 +235,7 @@ class CommandRunner(object):
return return_code
-class Protocol(object): # pylint: disable=abstract-class-not-used
+class Protocol(object):
"""
Similar to a Connection object, provides a transport layer for the dispatcher.
Uses a pre-defined API instead of pexpect using Shell.
diff --git a/lava_dispatcher/pipeline/connections/lxc.py b/lava_dispatcher/pipeline/connections/lxc.py
index 958465e..3e38204 100644
--- a/lava_dispatcher/pipeline/connections/lxc.py
+++ b/lava_dispatcher/pipeline/connections/lxc.py
@@ -55,23 +55,31 @@ class ConnectLxc(Action):
# Attach usb device to lxc
if 'device_path' in list(self.job.device.keys()):
- # Wait USB_SHOW_UP_TIMEOUT seconds for the usb device to show up
- self.logger.info("Waiting %d seconds for usb device to show up" %
- USB_SHOW_UP_TIMEOUT)
- sleep(USB_SHOW_UP_TIMEOUT)
+ device_path = self.job.device['device_path']
+ if not isinstance(device_path, list):
+ raise JobError("device_path should be a list")
- device_path = os.path.realpath(self.job.device['device_path'])
- if os.path.isdir(device_path):
- devices = os.listdir(device_path)
- else:
- devices = [device_path]
+ if device_path:
+ # Wait USB_SHOW_UP_TIMEOUT seconds for usb device to show up
+ self.logger.info("Wait %d seconds for usb device to show up",
+ USB_SHOW_UP_TIMEOUT)
+ sleep(USB_SHOW_UP_TIMEOUT)
+
+ for path in device_path:
+ path = os.path.realpath(path)
+ if os.path.isdir(path):
+ devices = os.listdir(path)
+ else:
+ devices = [path]
- for device in devices:
- device = os.path.join(device_path, device)
- lxc_cmd = ['lxc-device', '-n', lxc_name, 'add', device]
- self.run_command(lxc_cmd)
- self.logger.debug("%s: devices added from %s", lxc_name,
- device_path)
+ for device in devices:
+ device = os.path.join(path, device)
+ lxc_cmd = ['lxc-device', '-n', lxc_name, 'add', device]
+ self.run_command(lxc_cmd)
+ self.logger.debug("%s: devices added from %s", lxc_name,
+ path)
+ else:
+ self.logger.debug("device_path is None")
cmd = "lxc-attach -n {0}".format(lxc_name)
self.logger.info("%s Connecting to device using '%s'", self.name, cmd)
diff --git a/lava_dispatcher/pipeline/connections/ssh.py b/lava_dispatcher/pipeline/connections/ssh.py
index ceb0d8e..0aeef1a 100644
--- a/lava_dispatcher/pipeline/connections/ssh.py
+++ b/lava_dispatcher/pipeline/connections/ssh.py
@@ -19,15 +19,12 @@
# with this program; if not, see <http://www.gnu.org/licenses>.
-import os
-import json
import signal
from lava_dispatcher.pipeline.action import JobError
from lava_dispatcher.pipeline.utils.filesystem import check_ssh_identity_file
from lava_dispatcher.pipeline.utils.shell import infrastructure_error
from lava_dispatcher.pipeline.action import Action
from lava_dispatcher.pipeline.shell import ShellCommand, ShellSession
-from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
from lava_dispatcher.pipeline.utils.constants import DEFAULT_SHELL_PROMPT
diff --git a/lava_dispatcher/pipeline/deployment_data.py b/lava_dispatcher/pipeline/deployment_data.py
index cafb7ad..5825053 100644
--- a/lava_dispatcher/pipeline/deployment_data.py
+++ b/lava_dispatcher/pipeline/deployment_data.py
@@ -151,6 +151,22 @@ fedora = deployment_data_dict({ # pylint: disable=invalid-name
'lava_test_shell_file': None,
})
+centos = deployment_data_dict({ # pylint: disable=invalid-name
+ 'TESTER_PS1': r"linaro-test [rc=$(echo \$?)]# ",
+ 'TESTER_PS1_PATTERN': r"linaro-test \[rc=(\d+)\]# ",
+ 'TESTER_PS1_INCLUDES_RC': True,
+ 'boot_cmds': 'boot_cmds',
+
+ # for lava-test-shell
+ 'distro': 'centos',
+ 'tar_flags': '--warning no-timestamp',
+ 'lava_test_sh_cmd': '/bin/bash',
+ 'lava_test_dir': '/lava-%s',
+ 'lava_test_results_part_attr': 'root_part',
+ 'lava_test_results_dir': '/lava-%s',
+ 'lava_test_shell_file': None,
+})
+
debian_installer = deployment_data_dict({ # pylint: disable=invalid-name
'TESTER_PS1': r"linaro-test [rc=$(echo \$?)]# ",
'TESTER_PS1_PATTERN': r"linaro-test \[rc=(\d+)\]# ",
@@ -178,3 +194,20 @@ debian_installer = deployment_data_dict({ # pylint: disable=invalid-name
'lava_test_results_dir': '/lava-%s',
'lava_test_shell_file': None,
})
+
+centos_installer = deployment_data_dict({ # pylint: disable=invalid-name
+ 'TESTER_PS1': r"linaro-test [rc=$(echo \$?)]# ",
+ 'TESTER_PS1_PATTERN': r"linaro-test \[rc=(\d+)\]# ",
+ 'TESTER_PS1_INCLUDES_RC': True,
+ 'boot_cmds': 'boot_cmds',
+ 'installer_extra_cmd': 'curl {OVERLAY_URL} > /lava-overlay.tar.gz\ntar -zxvf /lava-overlay.tar.gz -C /',
+ 'preseed_to_ramdisk': "preseed.cfg",
+
+ # for lava-test-shell
+ 'distro': 'centos',
+ 'lava_test_sh_cmd': '/bin/bash',
+ 'lava_test_dir': '/lava-%s',
+ 'lava_test_results_part_attr': 'root_part',
+ 'lava_test_results_dir': '/lava-%s',
+ 'lava_test_shell_file': None,
+})
diff --git a/lava_dispatcher/pipeline/device_types/beaglebone-black.conf b/lava_dispatcher/pipeline/device_types/beaglebone-black.conf
index 27aff5e..e65410b 100644
--- a/lava_dispatcher/pipeline/device_types/beaglebone-black.conf
+++ b/lava_dispatcher/pipeline/device_types/beaglebone-black.conf
@@ -6,9 +6,9 @@ parameters:
ramdisk: '0x81600000'
dtb: '0x815f0000'
bootz:
- kernel: '0x81000000'
- ramdisk: '0x82000000'
- dtb: '0x81f00000'
+ kernel: '0x82000000'
+ ramdisk: '0x83000000'
+ dtb: '0x88000000'
actions:
deploy:
diff --git a/lava_dispatcher/pipeline/devices/d02-01.yaml b/lava_dispatcher/pipeline/devices/d02-01.yaml
index ae99712..6b89d8c 100644
--- a/lava_dispatcher/pipeline/devices/d02-01.yaml
+++ b/lava_dispatcher/pipeline/devices/d02-01.yaml
@@ -1,16 +1,13 @@
device_type: d02
-parameters:
- stuff:
- - junk
commands:
connect: telnet ratchet 7003
hard_reset: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 02 --command reboot --delay 5
power_off: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 02 --command off
power_on: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 02 --command on
-character-delays:
- boot: 300
- test: 100
+character_delays:
+ boot: 30
+ test: 30
actions:
deploy:
@@ -42,27 +39,34 @@ actions:
expect_shell: False
commands:
- net_bootp
- - linux (tftp,{SERVER_IP})/{KERNEL} auto=true interface=eth0 priority=critical noshell BOOT_DEBUG=1 DEBIAN_FRONTEND=text url=tftp://{SERVER_IP}/{PRESEED_CONFIG} efi=noruntime ---
+ - linux (tftp,{SERVER_IP})/{KERNEL} auto=true interface=eth0 priority=critical noshell BOOT_DEBUG=1 DEBIAN_FRONTEND=text url=tftp://{SERVER_IP}/{PRESEED_CONFIG} efi=noruntime --- console=ttyS0,115200 debug verbose
- initrd (tftp,{SERVER_IP})/{RAMDISK}
- devicetree (tftp,{SERVER_IP})/{DTB}
- boot
debian-installed:
commands:
+ - insmod part_gpt
+ - insmod ext2
+ - insmod part_msdos
- insmod chain
- ls
- ls (hd1,gpt1)
+ - sleep 10
- set
- set root=(hd1,gpt1)
+ - sleep 10
- ls (hd1,gpt1)
+ - sleep 10
- chainloader (hd1,gpt1)/efi/debian/grubaa64.efi
+ - sleep 10
- boot
timeouts:
actions:
grub-main:
- seconds: 30000
+ seconds: 5400
bootloader-action:
- seconds: 30000
+ seconds: 5400
bootloader-interrupt:
seconds: 300
bootloader-commands:
@@ -72,18 +76,18 @@ timeouts:
auto-login-action:
seconds: 600
installer-wait:
- seconds: 30000
+ seconds: 5400
pdu_reboot:
seconds: 3000
connections:
pdu_reboot:
seconds: 3000
grub-main:
- seconds: 30000
+ seconds: 5400
bootloader-action:
- seconds: 30000
+ seconds: 5400
installer-wait:
- seconds: 30000
+ seconds: 5400
bootloader-retry:
seconds: 3000
bootloader-interrupt:
diff --git a/lava_dispatcher/pipeline/devices/juno-uboot.yaml b/lava_dispatcher/pipeline/devices/juno-uboot.yaml
new file mode 100644
index 0000000..b1ee508
--- /dev/null
+++ b/lava_dispatcher/pipeline/devices/juno-uboot.yaml
@@ -0,0 +1,106 @@
+device_type: juno
+
+parameters:
+ booti:
+ kernel: '0x80080000'
+ ramdisk: '0x84000000'
+ dtb: '0x83000000'
+ media: # four USB slots.
+ usb:
+ UUID-required: True
+ SanDisk_Ultra:
+ uuid: "usb-SanDisk_Ultra_20051536410F43008552-0:0"
+ device_id: 0 # the bootloader device id for this media on the 'usb' interface
+
+commands:
+ connect: telnet localhost 7002
+ hard_reset: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command off --port 02; sleep 10; /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command on --port 02
+ power_off: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command off --port 02
+ power_on: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command on --port 02
+
+actions:
+ deploy:
+ # list of deployment methods which this device supports
+ methods:
+ lxc:
+ nfs:
+ tftp:
+ usb:
+ connections:
+ adb:
+ lxc:
+ serial:
+ boot:
+ # list of connection methods which this device supports
+ connections:
+ adb:
+ lxc:
+ serial:
+ # list of boot methods which this device supports.
+ methods:
+ lxc:
+ bootloader-defaults:
+ parameters:
+ bootloader_prompt:
+ boot_message: Booting Linux
+ u-boot:
+ parameters:
+ bootloader_prompt: VExpress64
+ boot_message: Booting Linux
+ send_char: False
+ ramdisk:
+ commands:
+ - setenv autoload no
+ - setenv bootdelay 1
+ - setenv ethact smc911x-0
+ - setenv fdt_high 0xffffffffffffffff
+ - setenv fdtfile board.dtb
+ - setenv initrd_high 0xffffffffffffffff
+ - setenv initrd_name ramdisk.img
+ - setenv kernel_name norkern
+ - setenv bootargs 'console=ttyAMA0,115200n8 root=/dev/sda2 rw rootwait earlycon=pl011,0x7ff80000 debug systemd.log_target=null user_debug=31 androidboot.hardware=juno loglevel=9'
+ - setenv bootcmd 'afs load ${kernel_name} {KERNEL_ADDR} ; afs load ${fdtfile} {DTB_ADDR} ; fdt addr {DTB_ADDR}; fdt resize; if afs load ${initrd_name} {RAMDISK_ADDR} ; then setenv initrd_param {RAMDISK_ADDR}; else setenv initrd_param -; fi ; setenv bootargs ${bootargs} ${bootargs_sky2}; booti {KERNEL_ADDR} ${initrd_param} {DTB_ADDR}'
+ - boot
+ nfs:
+ commands:
+ - setenv autoload no
+ - setenv bootdelay 1
+ - setenv ethact smc911x-0
+ - setenv fdt_high 0xffffffffffffffff
+ - setenv fdtfile board.dtb
+ - setenv initrd_high 0xffffffffffffffff
+ - setenv initrd_name ramdisk.img
+ - setenv kernel_name norkern
+ - setenv bootargs 'console=ttyAMA0,115200n8 root=/dev/nfs nfsroot={SERVER_IP}:{NFSROOTFS} ip=dhcp rw rootwait earlycon=pl011,0x7ff80000 debug systemd.log_target=null user_debug=31 androidboot.hardware=juno loglevel=9'
+ - setenv bootcmd 'afs load ${kernel_name} {KERNEL_ADDR} ; afs load ${fdtfile} {DTB_ADDR} ; fdt addr {DTB_ADDR}; fdt resize; if afs load ${initrd_name} {RAMDISK_ADDR} ; then setenv initrd_param {RAMDISK_ADDR}; else setenv initrd_param -; fi ; setenv bootargs ${bootargs} ${bootargs_sky2}; booti {KERNEL_ADDR} ${initrd_param} {DTB_ADDR}'
+ - boot
+ ramdisk-tftp:
+ commands:
+ - setenv autoload no
+ - setenv bootdelay 1
+ - setenv ethact smc911x-0
+ - setenv fdt_high 0xffffffffffffffff
+ - setenv initrd_high 0xffffffffffffffff
+ - setenv initrd_name ramdisk.cpio
+ - setenv loadfdt 'tftp {DTB_ADDR} {DTB}'
+ - setenv loadkernel 'tftp {KERNEL_ADDR} {KERNEL}'
+ - setenv loadinitrd 'tftp {RAMDISK_ADDR} {RAMDISK}'
+ - setenv bootargs 'console=ttyAMA0,115200n8 root=/dev/sda2 rw rootwait earlycon=pl011,0x7ff80000 debug systemd.log_target=null user_debug=31 androidboot.hardware=juno loglevel=9'
+ - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadfdt; fdt addr {DTB_ADDR}; fdt resize; if run loadinitrd; then setenv initrd_param {RAMDISK_ADDR}; else setenv initrd_param -; fi ; setenv bootargs ${bootargs} ${bootargs_sky2}; booti {KERNEL_ADDR} ${initrd_param} {DTB_ADDR}'
+ - boot
+ tftp:
+ commands:
+ - setenv autoload no
+ - setenv bootdelay 1
+ - setenv ethact smc911x-0
+ - setenv fdt_high 0xffffffffffffffff
+ - setenv initrd_high 0xffffffffffffffff
+ - setenv initrd_name ramdisk.img
+ - setenv loadfdt 'tftp {DTB_ADDR} {DTB}'
+ - setenv loadkernel 'tftp {KERNEL_ADDR} {KERNEL}'
+ - setenv bootargs 'console=ttyAMA0,115200n8 root=/dev/nfs nfsroot={SERVER_IP}:{NFSROOTFS} ip=dhcp rw rootwait earlycon=pl011,0x7ff80000 debug systemd.log_target=null user_debug=31 androidboot.hardware=juno loglevel=9'
+ - setenv bootcmd 'dhcp; setenv serverip {SERVER_IP}; run loadkernel; run loadfdt; fdt addr {DTB_ADDR}; fdt resize; if afs load ${initrd_name} {RAMDISK_ADDR} ; then setenv initrd_param {RAMDISK_ADDR}; else setenv initrd_param -; fi ; setenv bootargs ${bootargs} ${bootargs_sky2}; booti {KERNEL_ADDR} ${initrd_param} {DTB_ADDR}'
+ - boot
+ use-defaults:
+ commands:
+ - boot
diff --git a/lava_dispatcher/pipeline/devices/juno-uefi.yaml b/lava_dispatcher/pipeline/devices/juno-uefi.yaml
new file mode 100644
index 0000000..998c660
--- /dev/null
+++ b/lava_dispatcher/pipeline/devices/juno-uefi.yaml
@@ -0,0 +1,155 @@
+device_type: juno
+
+parameters:
+ media: # four USB slots.
+ usb:
+ UUID-required: True
+ SanDisk_Ultra:
+ uuid: "usb-SanDisk_Ultra_20051536410F43008552-0:0"
+ device_id: 0 # the bootloader device id for this media on the 'usb' interface
+
+commands:
+ connect: telnet localhost 7002
+ hard_reset: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command off --port 02; sleep 10; /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command on --port 02
+ power_off: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command off --port 02
+ power_on: /usr/local/lab-scripts/pduclient --daemon services --hostname pdu14 --command on --port 02
+
+character-delays:
+ boot: 50
+
+actions:
+ deploy:
+ # list of deployment methods which this device supports
+ methods:
+ tftp:
+ boot:
+ # list of connection methods which this device supports
+ connections:
+ serial:
+ # list of boot methods which this device supports.
+ methods:
+ uefi-menu:
+ parameters:
+ character_delay: 300
+ interrupt_prompt: The default boot selection will start in
+ interrupt_string: ' '
+ item_markup:
+ - "["
+ - "]"
+ item_class: '0-9'
+ separator: ' '
+ label_class: 'a-zA-Z0-9\s\:'
+ bootloader_prompt: 'Start:'
+ boot_message: "Loaded: LinuxImage"
+ # interrupt: # character needed to interrupt u-boot, single whitespace by default
+ # method specific stanza
+ nor_flash:
+ - select:
+ items:
+ - 'Boot Manager'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Add Boot Device Entry'
+ wait: "Select the Boot Device:"
+ - select:
+ items:
+ - 'NOR Flash'
+ wait: "File path of the EFI Application or the kernel:"
+ - select:
+ enter: norkern
+ wait: "Is an EFI Application?"
+ - select:
+ enter: n
+ wait: "Has FDT support?"
+ - select:
+ enter: y
+ wait: "Add an initrd:"
+ - select:
+ enter: n
+ wait: 'Arguments to pass to the binary:'
+ - select:
+ enter: "dtb=board.dtb initrd=ramdisk.img console=ttyAMA0,115200 androidboot.hardware=juno systemd.log_target=null rootwait root=/dev/sda1"
+ wait: 'Description for this new Entry:'
+ - select:
+ enter: '{TEST_MENU_NAME}'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Update FDT path'
+ wait: "Select the Boot Device:"
+ - select:
+ items:
+ - 'TFTP on MAC Address: 00:02:F7:00:58:EB' # substitute the MAC in the template
+ wait: "File path of the FDT blob:"
+ - select:
+ enter: 'board.dtb'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Return to main menu'
+ wait: "Start:"
+ - select:
+ items:
+ - '{TEST_MENU_NAME}'
+ tftp:
+ - select:
+ items:
+ - 'Boot Manager'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Add Boot Device Entry'
+ wait: "Select the Boot Device:"
+ - select:
+ items:
+ - 'TFTP on MAC Address: 00:02:F7:00:58:EB' # substitute the MAC in the template
+ wait: "Get the IP address from DHCP:"
+ - select:
+ enter: y
+ wait: "Get the TFTP server IP address:"
+ - select:
+ enter: '{SERVER_IP}'
+ wait: "File path of the EFI Application or the kernel :"
+ - select:
+ enter: '{KERNEL}'
+ wait: "Is an EFI Application?"
+ - select:
+ enter: n
+ wait: "Has FDT support?"
+ - select:
+ enter: y
+ wait: "Add an initrd:"
+ - select:
+ enter: n
+ wait: 'Arguments to pass to the binary:'
+ - select:
+ enter: "console=ttyS0,115200 earlyprintk=uart8250-32bit,0x1c020000 debug root=/dev/nfs rw nfsroot={SERVER_IP}:{NFSROOTFS},tcp,hard,intr ip=dhcp"
+ wait: 'Description for this new Entry:'
+ - select:
+ enter: '{TEST_MENU_NAME}'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Update FDT path'
+ wait: "Select the Boot Device:"
+ - select:
+ items:
+ - 'TFTP on MAC Address: 00:02:F7:00:58:EB' # substitute the MAC in the template
+ wait: "Get the IP address from DHCP:"
+ - select:
+ enter: y
+ wait: "Get the TFTP server IP address:"
+ - select:
+ enter: '{SERVER_IP}'
+ wait: "File path of the FDT blob :"
+ - select:
+ enter: '{DTB}'
+ wait: "Choice:"
+ - select:
+ items:
+ - 'Return to main menu'
+ wait: "Start:"
+ - select:
+ items:
+ - '{TEST_MENU_NAME}'
diff --git a/lava_dispatcher/pipeline/devices/mustang-uefi.yaml b/lava_dispatcher/pipeline/devices/mustang-uefi.yaml
index 1db931e..2ae7e6a 100644
--- a/lava_dispatcher/pipeline/devices/mustang-uefi.yaml
+++ b/lava_dispatcher/pipeline/devices/mustang-uefi.yaml
@@ -6,7 +6,7 @@ commands:
#power_off: /usr/bin/pduclient --daemon services --hostname pdu09 --command off --port 05
#power_on: /usr/bin/pduclient --daemon services --hostname pdu09 --command on --port 05
-character-delays:
+character_delays:
boot: 10
actions:
diff --git a/lava_dispatcher/pipeline/devices/nexus10-01.yaml b/lava_dispatcher/pipeline/devices/nexus10-01.yaml
index 1b352f6..9535e38 100644
--- a/lava_dispatcher/pipeline/devices/nexus10-01.yaml
+++ b/lava_dispatcher/pipeline/devices/nexus10-01.yaml
@@ -1,7 +1,7 @@
device_type: nexus10
adb_serial_number: R32D300FRYP
fastboot_serial_number: R32D300FRYP
-device_path: /dev/bus/usb/004
+device_path: ['/dev/bus/usb/004']
actions:
deploy:
diff --git a/lava_dispatcher/pipeline/devices/nexus4-01.yaml b/lava_dispatcher/pipeline/devices/nexus4-01.yaml
index b76761b..629c34c 100644
--- a/lava_dispatcher/pipeline/devices/nexus4-01.yaml
+++ b/lava_dispatcher/pipeline/devices/nexus4-01.yaml
@@ -1,7 +1,7 @@
device_type: nexus4
adb_serial_number: 04f228d1d9c76f39
fastboot_serial_number: 04f228d1d9c76f39
-device_path: /dev/bus/usb/001
+device_path: ['/dev/bus/usb/001']
actions:
deploy:
diff --git a/lava_dispatcher/pipeline/devices/nexus9-01.yaml b/lava_dispatcher/pipeline/devices/nexus9-01.yaml
new file mode 100644
index 0000000..43cc843
--- /dev/null
+++ b/lava_dispatcher/pipeline/devices/nexus9-01.yaml
@@ -0,0 +1,33 @@
+device_type: nexus9
+adb_serial_number: HT4B7JT01005
+fastboot_serial_number: HT4B7JT01005
+device_path: ['/dev/bus/usb/004']
+
+actions:
+ deploy:
+ methods:
+ lxc:
+ fastboot:
+ connections:
+ lxc:
+ serial:
+ boot:
+ connections:
+ lxc:
+ methods:
+ lxc:
+ fastboot:
+
+timeouts:
+ actions:
+ apply-overlay-image:
+ seconds: 120
+ umount-retry:
+ seconds: 45
+ lava-test-shell:
+ seconds: 30
+ power_off:
+ seconds: 5
+ connections:
+ uboot-retry:
+ seconds: 60
diff --git a/lava_dispatcher/pipeline/devices/x86-01.yaml b/lava_dispatcher/pipeline/devices/x86-01.yaml
index e31bf10..6668e1e 100644
--- a/lava_dispatcher/pipeline/devices/x86-01.yaml
+++ b/lava_dispatcher/pipeline/devices/x86-01.yaml
@@ -8,7 +8,7 @@ commands:
power_off: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 08 --command off
power_on: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 08 --command on
-character-delays:
+character_delays:
boot: 250
test: 100
diff --git a/lava_dispatcher/pipeline/devices/x86-02.yaml b/lava_dispatcher/pipeline/devices/x86-02.yaml
index 051bd1a..e6afc89 100644
--- a/lava_dispatcher/pipeline/devices/x86-02.yaml
+++ b/lava_dispatcher/pipeline/devices/x86-02.yaml
@@ -6,7 +6,7 @@ commands:
power_off: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 01 --command off
power_on: /usr/bin/pduclient --hostname pdu02 --daemon ironhide --port 01 --command on
-character-delays:
+character_delays:
boot: 100
test: 100
@@ -64,6 +64,30 @@ actions:
- ls (ata0,msdos1)/
- chainloader (ata0,msdos1)/boot/grub/i386-pc/boot.img
- boot
+ centos-installer:
+ expect_shell: False
+ commands:
+ - insmod part_gpt
+ - insmod ext2
+ - insmod linux
+ - insmod tftp
+ - insmod pata
+ - insmod part_msdos
+ - linux (tftp,{SERVER_IP})/{KERNEL} noshell inst.text inst.cmdline RUNKS=1 inst.ks=file://{PRESEED_LOCAL} efi=noruntime --- console=ttyS0,115200 debug verbose
+ - initrd (tftp,{SERVER_IP})/{RAMDISK}
+ - boot
+ centos-installed:
+ commands:
+ - insmod part_gpt
+ - insmod ext2
+ - insmod linux
+ - insmod tftp
+ - insmod pata
+ - insmod part_msdos
+ - insmod chain
+ - ls (ata0,msdos1)/
+ - chainloader (ata0,msdos1)/grub2/i386-pc/boot.img
+ - boot
timeouts:
actions:
diff --git a/lava_dispatcher/pipeline/lava_test_shell/lava-test-case b/lava_dispatcher/pipeline/lava_test_shell/lava-test-case
new file mode 100644
index 0000000..8405c37
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/lava-test-case
@@ -0,0 +1,88 @@
+#NOTE the lava_test_shell_action fills in the proper interpreter path
+# above during target deployment
+
+usage () {
+ echo "Usage: lava-test-case TEST_CASE_ID --shell cmds ..."
+ echo " or: lava-test-case TEST_CASE_ID --result RESULT [--units UNITS] "
+ echo " [--measurement MEASUREMENT]"
+ echo ""
+ echo "Either run or record the results of a particular test case"
+}
+
+rc=0
+
+TEST_CASE_ID="$1"
+shift
+if [ -z "$TEST_CASE_ID" ]; then
+ usage
+ exit 1
+fi
+if [ "$1" = "--shell" ]; then
+ shift
+ echo "<LAVA_SIGNAL_STARTTC $TEST_CASE_ID>"
+ eval "$*"
+ rc=$?
+ echo "<LAVA_SIGNAL_ENDTC $TEST_CASE_ID>"
+ if [ $rc -eq 0 ]; then
+ RESULT=pass
+ else
+ RESULT=fail
+ fi
+else
+ while [ $# -gt 0 ]; do
+ case $1 in
+ --result)
+ shift
+ RESULT=$1
+ shift
+ ;;
+ --units)
+ shift
+ UNITS=$1
+ shift
+ ;;
+ --measurement)
+ shift
+ MEASUREMENT=$1
+ shift
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+ done
+fi
+
+# $LAVA_RESULT_DIR is set by lava-test-shell
+result_dir="$LAVA_RESULT_DIR/results/$TEST_CASE_ID"
+mkdir -p "$result_dir"
+
+# signal the test case results
+TCDATA=""
+
+if [ -z "${RESULT+x}" ]; then
+ echo "--result must be specified"
+ exit 1
+else
+ echo $RESULT > "$result_dir/result"
+ TCDATA="$TCDATA RESULT=$RESULT"
+fi
+
+if [ -n "${UNITS+x}" ]; then
+ echo $UNITS > "$result_dir/units"
+ TCDATA="$TCDATA UNITS=$UNITS"
+fi
+
+if [ -n "${MEASUREMENT+x}" ]; then
+ echo $MEASUREMENT > "$result_dir/measurement"
+ TCDATA="$TCDATA MEASUREMENT=$MEASUREMENT"
+fi
+
+# signal the test case results
+echo "<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=$TEST_CASE_ID$TCDATA>"
+
+# lava-test-case testname --shell false should report a fail as test result
+# but not fail itself; hence don't honor 'rc' if we reach this, but exit 0
+exit 0
+
diff --git a/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-network b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-network
new file mode 100644
index 0000000..b5a102a
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-network
@@ -0,0 +1,110 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#lava-network
+#-----------------
+#Helper script to broadcast IP data from the test image, wait for data
+#to be received by the rest of the group (or one role within the group)
+#and then provide an interface to retrieve IP data about the group on
+#the command line.
+#
+#Raising a suitable network interface is a job left for the designer of
+#the test definition / image but once a network interface is available,
+#lava-network can be asked to broadcast this information to the rest of
+#the group. At a later stage of the test, before the IP details of the
+#group need to be used, call lava-network collect to receive the same
+#information about the rest of the group.
+#
+#All usage of lava-network needs to use a broadcast (which wraps a call
+#to lava-send) and a collect (which wraps a call to lava-wait-all). As
+#a wrapper around lava-wait-all, collect will block until the rest of
+#the group (or devices in the group with the specified role) has made a
+#broadcast.
+#
+#After the data has been collected, it can be queried for any board
+#specified in the output of lava-group:
+#
+#lava-network query server
+#192.168.3.56
+#
+#Usage:
+# broadcast network info:
+# lava-network broadcast [interface]
+# collect network info:
+# lava-network collect [interface] <role>
+# query specific host info:
+# lava-network query [hostname] [info]
+# export hosts file:
+# lava-network hosts [path of hosts]
+#
+#So interface would be mandatory for broadcast and collect, hostname
+#would be mandatory for query, "path of hosts" would be mandatory for
+#hosts, role is optional for collect.
+
+
+LAVA_MULTI_NODE_API="LAVA_NETWORK"
+#MESSAGE_TIMEOUT=5
+MESSAGE_NEED_ACK=
+
+_LAVA_NETWORK_ID="network_info"
+_LAVA_NETWORK_ARG_MIN=2
+
+. $LAVA_TEST_BIN/lava-multi-node.lib
+
+LAVA_MULTI_NODE_NETWORK_CACHE="/tmp/lava_multi_node_network_cache.txt"
+
+_lava_multi_node_debug "$LAVA_MULTI_NODE_API checking arguments..."
+if [ $# -lt $_LAVA_NETWORK_ARG_MIN ]; then
+ _lava_multi_node_debug "$FUNCNAME Not enough arguments."
+ exit $LAVA_MULTI_NODE_EXIT_ERROR
+fi
+
+_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle sub-command..."
+case "$1" in
+ "broadcast")
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API handle broadcast command..."
+ LAVA_MULTI_NODE_API="LAVA_SEND"
+ MESSAGE_COMMAND="<${LAVA_MULTI_NODE_API}"
+ export MESSAGE_ACK="<${LAVA_MULTI_NODE_API}_ACK>"
+ export MESSAGE_REPLY="<${LAVA_MULTI_NODE_API}_COMPLETE"
+ export MESSAGE_REPLY_ACK="<${LAVA_MULTI_NODE_API}_COMPLETE_ACK>"
+ export MESSAGE_HEAD="$MESSAGE_PREFIX $MESSAGE_COMMAND"
+ NETWORK_INFO_STREAM=`lava_multi_node_get_network_info $2`
+ lava_multi_node_send $_LAVA_NETWORK_ID $NETWORK_INFO_STREAM
+ ;;
+
+ "collect")
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API handle collect command..."
+ LAVA_MULTI_NODE_API="LAVA_WAIT_ALL"
+ MESSAGE_COMMAND="<${LAVA_MULTI_NODE_API}"
+ export MESSAGE_ACK="<${LAVA_MULTI_NODE_API}_ACK>"
+ export MESSAGE_REPLY="<${LAVA_MULTI_NODE_API}_COMPLETE"
+ export MESSAGE_REPLY_ACK="<${LAVA_MULTI_NODE_API}_COMPLETE_ACK>"
+ export MESSAGE_HEAD="$MESSAGE_PREFIX $MESSAGE_COMMAND"
+ lava_multi_node_send $_LAVA_NETWORK_ID $3
+ lava_multi_node_wait_for_message $LAVA_MULTI_NODE_NETWORK_CACHE
+ ;;
+
+ "query")
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API handle query command..."
+ lava_multi_node_check_cache $LAVA_MULTI_NODE_NETWORK_CACHE
+ lava_multi_node_print_host_info $2 $3
+ ;;
+
+ "hosts")
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API handle hosts command..."
+ lava_multi_node_check_cache $LAVA_MULTI_NODE_NETWORK_CACHE
+ lava_multi_node_make_hosts $2
+ ;;
+
+ "alias-hosts")
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API handle aliased hosts command..."
+ lava_multi_node_check_cache $LAVA_MULTI_NODE_NETWORK_CACHE
+ lava_multi_node_make_aliased_hosts $2
+ ;;
+
+ *)
+ _lava_multi_node_debug "$LAVA_MULTI_NODE_API command $1 is not supported."
+ exit $LAVA_MULTI_NODE_EXIT_ERROR
+ ;;
+esac
diff --git a/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-send b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-send
new file mode 100644
index 0000000..eb4b46a
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-send
@@ -0,0 +1,17 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#
+#Sends a message to the group, optionally passing associated key-value
+#data pairs. Sending a message is a non-blocking operation. The message
+#is guaranteed to be available to all members of the group, but some of
+#them might never retrieve it.
+#
+#Usage: ``lava-send <message-id> [key1=val1 [key2=val2] ...]``
+LAVA_MULTI_NODE_API="LAVA_SEND"
+#MESSAGE_TIMEOUT=5
+MESSAGE_NEED_ACK=
+
+. $LAVA_TEST_BIN/lava-multi-node.lib
+
+lava_multi_node_send $1 "$(_get_key_value_pattern $@)"
diff --git a/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-sync b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-sync
new file mode 100644
index 0000000..426075f
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-sync
@@ -0,0 +1,20 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#
+#Global synchronization primitive. Sends a message, and waits for the
+#same message from all of the other devices.
+#
+#Usage: ``lava-sync <message>``
+#
+#``lava-sync foo`` is effectively the same as ``lava-send foo`` followed
+#by ``lava-wait-all foo``.
+LAVA_MULTI_NODE_API="LAVA_SYNC"
+#MESSAGE_TIMEOUT=5
+MESSAGE_NEED_ACK=
+
+. $LAVA_TEST_BIN/lava-multi-node.lib
+
+lava_multi_node_send $1
+
+lava_multi_node_wait_for_message
diff --git a/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait
new file mode 100644
index 0000000..c6697fc
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait
@@ -0,0 +1,21 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#
+#Waits until any other device in the group sends a message with the given
+#ID. This call will block until such message is sent.
+#
+#Usage: ``lava-wait <message-id>``
+#
+#If there was data passed in the message, the key-value pairs will be
+#printed in the standard output, each in one line. If no key values were
+#passed, nothing is printed.
+LAVA_MULTI_NODE_API="LAVA_WAIT"
+#MESSAGE_TIMEOUT=5
+MESSAGE_NEED_ACK=
+
+. $LAVA_TEST_BIN/lava-multi-node.lib
+
+lava_multi_node_send $1
+
+lava_multi_node_wait_for_message
diff --git a/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait-all b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait-all
new file mode 100644
index 0000000..bf8ac5a
--- /dev/null
+++ b/lava_dispatcher/pipeline/lava_test_shell/multi_node/lava-wait-all
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+#This file is for Multi-Node test
+#
+#Waits until **all** other devices in the group send a message with the
+#given message ID. IF ``<role>`` is passed, only wait until all devices
+#with that given role send a message.
+#
+#``lava-wait-all <message-id> [<role>]``
+#
+#If data was sent by the other devices with the message, the key-value
+#pairs will be printed one per line, prefixed with the device name and
+#whitespace.
+LAVA_MULTI_NODE_API="LAVA_WAIT_ALL"
+#MESSAGE_TIMEOUT=5
+MESSAGE_NEED_ACK=
+
+. $LAVA_TEST_BIN/lava-multi-node.lib
+
+lava_multi_node_send $1 $2
+
+lava_multi_node_wait_for_message
+
diff --git a/lava_dispatcher/pipeline/log.py b/lava_dispatcher/pipeline/log.py
index b786de6..709832f 100644
--- a/lava_dispatcher/pipeline/log.py
+++ b/lava_dispatcher/pipeline/log.py
@@ -19,7 +19,9 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
+import datetime
import logging
+import sys
import yaml
import zmq
import zmq.auth
@@ -82,16 +84,18 @@ class YAMLLogger(logging.Logger):
self.handler.setMetadata(level, name)
def log_message(self, level, level_name, message, *args, **kwargs):
- # If the received message is a dictionary then we look for specific log
- # parameters such as timestamp, else we assume the log message is a
- # string and dump the message.
- if isinstance(message, dict) and 'ts' in message:
- self._log(level, yaml.dump([{'ts': message['ts'],
- level_name: message['msg']}])[:-1],
- args, kwargs)
+ # Build the dictionnary
+ data = {'dt': datetime.datetime.utcnow().isoformat(),
+ 'lvl': level_name}
+
+ if isinstance(message, str) and args:
+ data['msg'] = message % args
else:
- self._log(level, yaml.dump([{level_name: message}])[:-1], args,
- kwargs)
+ data['msg'] = message
+ # Set width to a really large value in order to always get one line.
+ self._log(level, yaml.dump(data, default_flow_style=True,
+ default_style='"',
+ width=sys.maxint)[:-1], ())
def exception(self, exc, *args, **kwargs):
self.log_message(logging.ERROR, 'exception', exc, *args, **kwargs)
@@ -113,25 +117,3 @@ class YAMLLogger(logging.Logger):
def results(self, results, *args, **kwargs):
self.log_message(logging.INFO, 'results', results, *args, **kwargs)
-
-
-class StdLogger(object): # pylint: disable=too-few-public-methods
-
- def __init__(self, name, filename):
- """
- Output for stdout (which is redirected to the oob_file by the
- scheduler) should use the ASCII logger.
- """
- self.name = name
- self.description = "std logger"
- self.log = logging.getLogger("%s" % name)
- self.log.setLevel(logging.INFO)
- self.handler = logging.StreamHandler(filename)
- self.formatter = logging.Formatter('"%(asctime)s":\n - %(message)s')
- self.handler.setFormatter(self.formatter)
-
- def info(self, message):
- self.log.info(message)
-
- def debug(self, message):
- self.log.debug(message)
diff --git a/lava_dispatcher/pipeline/logical.py b/lava_dispatcher/pipeline/logical.py
index 196709e..8a45d0f 100644
--- a/lava_dispatcher/pipeline/logical.py
+++ b/lava_dispatcher/pipeline/logical.py
@@ -76,7 +76,7 @@ class RetryAction(Action):
self.run(connection)
-class DiagnosticAction(Action): # pylint: disable=abstract-class-not-used
+class DiagnosticAction(Action):
def __init__(self):
"""
@@ -98,11 +98,11 @@ class DiagnosticAction(Action): # pylint: disable=abstract-class-not-used
Log the requested diagnostic.
Raises NotImplementedError if subclass has omitted a trigger classmethod.
"""
- self.logger.debug("%s diagnostic triggered." % self.trigger())
+ self.logger.debug("%s diagnostic triggered.", self.trigger())
return connection
-class AdjuvantAction(Action): # pylint: disable=abstract-class-not-used
+class AdjuvantAction(Action):
"""
Adjuvants are associative actions - partners and helpers which can be executed if
the initial Action determines a particular state.
@@ -135,13 +135,13 @@ class AdjuvantAction(Action): # pylint: disable=abstract-class-not-used
return connection
if self.data[self.key()]:
self.adjuvant = True
- self.logger.warning("Adjuvant %s required" % self.name)
+ self.logger.warning("Adjuvant %s required", self.name)
else:
- self.logger.debug("Adjuvant %s skipped" % self.name)
+ self.logger.debug("Adjuvant %s skipped", self.name)
return connection
-class Deployment(object): # pylint: disable=abstract-class-not-used
+class Deployment(object):
"""
Deployment is a strategy class which aggregates Actions
until the request from the YAML can be validated or rejected.
@@ -252,7 +252,7 @@ class Boot(object):
return willing[0]
-class LavaTest(object): # pylint: disable=abstract-class-not-used
+class LavaTest(object):
"""
Allows selection of the LAVA test method for this job within the parser.
"""
diff --git a/lava_dispatcher/pipeline/power.py b/lava_dispatcher/pipeline/power.py
index b92b72d..ff06170 100644
--- a/lava_dispatcher/pipeline/power.py
+++ b/lava_dispatcher/pipeline/power.py
@@ -270,7 +270,7 @@ class FinalizeAction(Action):
protocol.finalise_protocol(self.job.device)
if self.errors:
self.results = {'status': self.errors}
- self.logger.debug('status: %s' % self.errors)
+ self.logger.error('status: %s', self.errors)
elif self.job.pipeline.errors:
self.results = {'status': "Incomplete"}
self.errors = "Incomplete"
@@ -279,7 +279,7 @@ class FinalizeAction(Action):
'Errors': self.job.pipeline.errors})
else:
self.results = {'success': "Complete"}
- self.logger.debug("Status: Complete")
+ self.logger.info("Status: Complete")
with open("%s/results.yaml" % self.job.parameters['output_dir'], 'w') as results:
results.write(yaml.dump(self.job.pipeline.describe()))
# from meliae import scanner
diff --git a/lava_dispatcher/pipeline/protocols/lxc.py b/lava_dispatcher/pipeline/protocols/lxc.py
index ad4e02a..f4a9576 100644
--- a/lava_dispatcher/pipeline/protocols/lxc.py
+++ b/lava_dispatcher/pipeline/protocols/lxc.py
@@ -89,9 +89,9 @@ class LxcProtocol(Protocol):
# execute the command.
shell.expect(pexpect.EOF)
if shell.exitstatus:
- self.logger.debug("%s command exited %d: %s" %
- (reboot_cmd, shell.exitstatus,
- shell.readlines()))
+ self.logger.debug("%s command exited %d: %s",
+ reboot_cmd, shell.exitstatus,
+ shell.readlines())
# ShellCommand executes the destroy command
cmd = "lxc-destroy -n {0} -f".format(self.lxc_name)
@@ -103,4 +103,4 @@ class LxcProtocol(Protocol):
if shell.exitstatus:
raise JobError("%s command exited %d: %s" % (cmd, shell.exitstatus,
shell.readlines()))
- self.logger.debug("%s protocol finalised." % self.name)
+ self.logger.debug("%s protocol finalised.", self.name)
diff --git a/lava_dispatcher/pipeline/protocols/multinode.py b/lava_dispatcher/pipeline/protocols/multinode.py
index 93f966f..186a55b 100644
--- a/lava_dispatcher/pipeline/protocols/multinode.py
+++ b/lava_dispatcher/pipeline/protocols/multinode.py
@@ -115,9 +115,10 @@ class MultinodeProtocol(Protocol):
self.sock.connect((self.settings['coordinator_hostname'], self.settings['port']))
return True
except socket.error as exc:
- self.logger.exception(
- "socket error on connect: %d %s %s" % (
- exc.errno, self.settings['coordinator_hostname'], self.settings['port']))
+ self.logger.exception("socket error on connect: %d %s %s",
+ exc.errno,
+ self.settings['coordinator_hostname'],
+ self.settings['port'])
time.sleep(delay)
self.sock.close()
return False
@@ -135,7 +136,7 @@ class MultinodeProtocol(Protocol):
self.logger.debug("zero bytes sent for message - connection closed?")
return False
except socket.error as exc:
- self.logger.exception("socket error '%s' on send" % exc.message)
+ self.logger.exception("socket error '%s' on send", exc.message)
self.sock.close()
return False
return True
@@ -153,7 +154,7 @@ class MultinodeProtocol(Protocol):
response += self.sock.recv(self.blocks)
recv_count += self.blocks
except socket.error as exc:
- self.logger.exception("socket error '%d' on response" % exc.errno)
+ self.logger.exception("socket error '%d' on response", exc.errno)
self.sock.close()
return json.dumps({"response": "wait"})
return response
@@ -177,8 +178,9 @@ class MultinodeProtocol(Protocol):
c_iter = 0
response = None
delay = self.settings['poll_delay']
- self.logger.debug("Connecting to LAVA Coordinator on %s:%s timeout=%d seconds." % (
- self.settings['coordinator_hostname'], self.settings['port'], timeout))
+ self.logger.debug("Connecting to LAVA Coordinator on %s:%s timeout=%d seconds.",
+ self.settings['coordinator_hostname'],
+ self.settings['port'], timeout)
while True:
c_iter += self.settings['poll_delay']
if self._connect(delay):
@@ -187,8 +189,9 @@ class MultinodeProtocol(Protocol):
delay += 2
continue
if not c_iter % int(10 * self.settings['poll_delay']):
- self.logger.debug("sending message: %s waited %s of %s seconds" % (
- json.loads(message)['request'], c_iter, int(timeout)))
+ self.logger.debug("sending message: %s waited %s of %s seconds",
+ json.loads(message)['request'], c_iter,
+ timeout)
# blocking synchronous call
if not self._send_message(message):
continue
@@ -198,7 +201,7 @@ class MultinodeProtocol(Protocol):
try:
json_data = json.loads(response)
except ValueError:
- self.logger.debug("response starting '%s' was not JSON" % response[:42])
+ self.logger.debug("response starting '%s' was not JSON", response[:42])
self.finalise_protocol()
break
if json_data['response'] != 'wait':
@@ -235,20 +238,20 @@ class MultinodeProtocol(Protocol):
self.initialise_group()
if self.delayed_start:
# delayed start needs to pull the sync timeout from the job parameters.
- self.logger.info("%s protocol initialised - start is delayed by up to %s seconds" % (
- self.name, self.system_timeout.duration))
+ self.logger.info("%s protocol initialised - start is delayed by up to %s seconds",
+ self.name, self.system_timeout.duration)
expect_role = self.parameters['protocols'][self.name]['expect_role']
- self.logger.debug("Delaying start for %s seconds, lava_wait_all for role %s" % (
- self.system_timeout.duration, expect_role))
+ self.logger.debug("Delaying start for %s seconds, lava_wait_all for role %s",
+ self.system_timeout.duration, expect_role)
# send using the system timeout
sync_msg = {
"request": "lava_wait_all",
"waitrole": expect_role,
"messageID": 'lava_start'}
self._send(sync_msg, True)
- self.logger.debug("sent %s" % json.dumps(sync_msg))
+ self.logger.debug("sent %s", json.dumps(sync_msg))
else:
- self.logger.debug("%s protocol initialised" % self.name)
+ self.logger.debug("%s protocol initialised", self.name)
def debug_setup(self):
self.settings = {
@@ -270,9 +273,9 @@ class MultinodeProtocol(Protocol):
"role": self.parameters['protocols'][self.name]['role'],
}
if self.delayed_start:
- self.logger.debug("Debug: delayed start activated, waiting for %s" %
+ self.logger.debug("Debug: delayed start activated, waiting for %s",
self.parameters['protocols'][self.name]['expect_role'])
- self.logger.debug("%s protocol initialised in debug mode" % self.name)
+ self.logger.debug("%s protocol initialised in debug mode", self.name)
def initialise_group(self):
"""
@@ -283,7 +286,8 @@ class MultinodeProtocol(Protocol):
"request": "group_data",
"group_size": self.parameters['protocols'][self.name]['group_size']
}
- self.logger.debug("Initialising group %s" % self.parameters['protocols'][self.name]['target_group'])
+ self.logger.debug("Initialising group %s",
+ self.parameters['protocols'][self.name]['target_group'])
self._send(init_msg, True)
def finalise_protocol(self, device=None):
@@ -292,7 +296,7 @@ class MultinodeProtocol(Protocol):
"group_size": self.parameters['protocols'][self.name]['group_size']
}
self._send(fin_msg, True)
- self.logger.debug("%s protocol finalised." % self.name)
+ self.logger.debug("%s protocol finalised.", self.name)
def _check_data(self, data):
try:
@@ -337,23 +341,24 @@ class MultinodeProtocol(Protocol):
message_id = json_data['messageID']
if json_data['request'] == "lava_sync":
- self.logger.debug("requesting lava_sync '%s'" % message_id)
+ self.logger.debug("requesting lava_sync '%s'", message_id)
reply_str = self.request_sync(message_id)
elif json_data['request'] == 'lava_wait':
- self.logger.debug("requesting lava_wait '%s'" % message_id)
+ self.logger.debug("requesting lava_wait '%s'", message_id)
reply_str = self.request_wait(message_id)
elif json_data['request'] == 'lava_wait_all':
if 'role' in json_data and json_data['role'] is not None:
reply_str = self.request_wait_all(message_id, json_data['role'])
- self.logger.debug("requesting lava_wait_all '%s' '%s'" % (message_id, json_data['role']))
+ self.logger.debug("requesting lava_wait_all '%s' '%s'",
+ message_id, json_data['role'])
else:
- self.logger.debug("requesting lava_wait_all '%s'" % message_id)
+ self.logger.debug("requesting lava_wait_all '%s'", message_id)
reply_str = self.request_wait_all(message_id)
elif json_data['request'] == "lava_send":
- self.logger.debug("requesting lava_send %s" % message_id)
+ self.logger.debug("requesting lava_send %s", message_id)
if 'message' in json_data and json_data['message'] is not None:
send_msg = json_data['message']
if not isinstance(send_msg, dict):
@@ -361,10 +366,11 @@ class MultinodeProtocol(Protocol):
self.logger.debug("message: %s", json.dumps(send_msg))
if 'yaml_line' in send_msg:
del send_msg['yaml_line']
- self.logger.debug("requesting lava_send %s with args %s" % (message_id, json.dumps(send_msg)))
+ self.logger.debug("requesting lava_send %s with args %s",
+ message_id, json.dumps(send_msg))
reply_str = self.request_send(message_id, send_msg)
else:
- self.logger.debug("requesting lava_send %s without args" % message_id)
+ self.logger.debug("requesting lava_send %s without args", message_id)
reply_str = self.request_send(message_id)
if reply_str == '':
@@ -434,7 +440,7 @@ class MultinodeProtocol(Protocol):
new_msg.update(msg)
if system:
return self.poll(json.dumps(new_msg), timeout=self.system_timeout.duration)
- self.logger.debug("final message: %s" % json.dumps(new_msg))
+ self.logger.debug("final message: %s", json.dumps(new_msg))
return self.poll(json.dumps(new_msg))
def request_wait_all(self, message_id, role=None):
@@ -476,13 +482,13 @@ class MultinodeProtocol(Protocol):
The message can consist of just the messageID:
{ "messageID": "string" }
"""
- self.logger.debug("request_send %s %s" % (message_id, message))
+ self.logger.debug("request_send %s %s", message_id, message)
if not message:
message = {}
send_msg = {"request": "lava_send",
"messageID": message_id,
"message": message}
- self.logger.debug("Sending %s" % send_msg)
+ self.logger.debug("Sending %s", send_msg)
return self._send(send_msg)
def request_sync(self, msg):
diff --git a/lava_dispatcher/pipeline/shell.py b/lava_dispatcher/pipeline/shell.py
index c0ea641..eb4e67e 100644
--- a/lava_dispatcher/pipeline/shell.py
+++ b/lava_dispatcher/pipeline/shell.py
@@ -20,11 +20,9 @@
import contextlib
import os
-import yaml
import pexpect
import sys
import time
-import logging
from lava_dispatcher.pipeline.action import (
Action,
JobError,
@@ -115,12 +113,9 @@ class ShellCommand(pexpect.spawn): # pylint: disable=too-many-public-methods
:param send_char: send one character or entire string
"""
if delay:
- self.logger.debug({
- "sending with %s millisecond delay" % delay: yaml.dump(
- s, default_style='"', width=1000)})
+ self.logger.debug({"sending": s, "delay": "%s millisecond" % delay})
else:
- self.logger.debug({
- "sending": "%s" % yaml.dump(s, default_style='"', width=1000)})
+ self.logger.debug({"sending": s})
self.send(s, delay, send_char)
self.send(os.linesep, delay)
@@ -152,6 +147,8 @@ class ShellCommand(pexpect.spawn): # pylint: disable=too-many-public-methods
proc = super(ShellCommand, self).expect(*args, **kw)
except pexpect.TIMEOUT:
raise TestError("ShellCommand command timed out.")
+ except ValueError as exc:
+ raise TestError(exc)
except pexpect.EOF:
# FIXME: deliberately closing the connection (and starting a new one) needs to be supported.
raise InfrastructureError("Connection closed")
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml
index a76421e..ce2840a 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/bbb-group-vland-alpha.yaml
@@ -39,8 +39,8 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.test.shell.TestShellRetry
name: lava-test-retry
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml
index f6b9c0a..8b6b190 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/bbb-nfs-url.yaml
@@ -34,12 +34,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml
index 17af63a..201364c 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/cubietruck-removable.yaml
@@ -42,12 +42,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
@@ -73,11 +74,6 @@
- class: actions.deploy.removable.MassStorage
name: storage-deploy
pipeline:
- - class: actions.deploy.download.DownloaderAction
- name: download_retry
- pipeline:
- - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
- - {class: actions.deploy.removable.DDAction, name: dd-image}
- {class: actions.deploy.overlay.CustomisationAction, name: customise}
- class: actions.deploy.overlay.OverlayAction
name: lava-overlay
@@ -93,10 +89,17 @@
- {class: actions.deploy.testdef.TestRunnerAction, name: test-runscript-overlay}
- {class: actions.deploy.overlay.CompressOverlay, name: compress-overlay}
- {class: actions.deploy.overlay.PersistentNFSOverlay, name: persistent-nfs-overlay}
+ - class: actions.deploy.download.DownloaderAction
+ name: download_retry
+ pipeline:
+ - {class: actions.deploy.download.HttpDownloadAction, name: http_download}
+ - {class: actions.deploy.apply_overlay.ApplyOverlayImage, name: apply-overlay-image}
+ - {class: actions.deploy.removable.DDAction, name: dd-image}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/grub.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/grub.yaml
index d831330..0bd6e6f 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/grub.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/grub.yaml
@@ -38,8 +38,8 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.grub.GrubMainAction
name: grub-main-action
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml
index 749a034..09c0ae5 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/ipxe.yaml
@@ -38,8 +38,8 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.ipxe.BootloaderAction
name: bootloader-action
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml
index 4889708..511a924 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/kexec.yaml
@@ -35,12 +35,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml
index 3fcef16..61f05d9 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/mustang-uefi.yaml
@@ -39,8 +39,8 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.uefi_menu.UefiMenuAction
name: uefi-menu-action
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml
index 7f4499e..d14726b 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/uboot-multiple.yaml
@@ -29,12 +29,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
@@ -84,12 +85,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
diff --git a/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml b/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml
index 929d835..436c825 100644
--- a/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml
+++ b/lava_dispatcher/pipeline/test/pipeline_refs/uboot.yaml
@@ -39,12 +39,13 @@
- {class: actions.deploy.apply_overlay.ExtractRamdisk, name: extract-overlay-ramdisk}
- {class: actions.deploy.apply_overlay.ExtractModules, name: extract-modules}
- {class: actions.deploy.apply_overlay.ApplyOverlayTftp, name: apply-overlay-tftp}
- - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.apply_overlay.ConfigurePreseedFile, name: configure-preseed-file}
+ - {class: actions.deploy.apply_overlay.CompressRamdisk, name: compress-ramdisk}
- {class: actions.deploy.environment.DeployDeviceEnvironment, name: deploy-device-env}
- class: actions.boot.u_boot.UBootAction
name: uboot-action
pipeline:
+ - {class: actions.boot.u_boot.UBootPrepareKernelAction, name: uboot-prepare-kernel}
- {class: actions.boot.u_boot.UBootSecondaryMedia, name: uboot-from-media}
- {class: actions.boot.u_boot.UBootCommandOverlay, name: uboot-overlay}
- {class: connections.serial.ConnectDevice, name: connect-device}
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/bbb-skip-install.yaml b/lava_dispatcher/pipeline/test/sample_jobs/bbb-skip-install.yaml
index a274668..8e161e2 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/bbb-skip-install.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/bbb-skip-install.yaml
@@ -28,6 +28,7 @@ actions:
nfsrootfs:
url: http://snapshots.linaro.org/components/lava/standard/debian/jessie/armhf/3/jessie-armhf-nfs.tar.gz
compression: gz
+ prefix: jessie/
os: oe
dtb:
url: http://snapshots.linaro.org/components/lava/standard/debian/jessie/armhf/3/dtbs/am335x-boneblack.dtb
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml b/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml
index 05568f8..9f93417 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/cubietruck-removable.yaml
@@ -63,7 +63,9 @@ actions:
url: http://images.validation.linaro.org/functional-test-images/panda/panda-raring_developer_20130723-408.img.gz
compression: gz
device: SanDisk_Ultra # needs to be exposed in the device-specific UI
- download: /usr/bin/wget
+ download:
+ tool: /usr/bin/wget
+ options: --no-check-certificate --no-proxy --connect-timeout=30 -S --progress=dot:giga -O - {DOWNLOAD_URL}
- boot:
method: u-boot
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installed-x86.yaml b/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installed-x86.yaml
new file mode 100644
index 0000000..d3e6456
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installed-x86.yaml
@@ -0,0 +1,24 @@
+device_type: grub-x86
+
+job_name: grub-x86-centos-installed
+timeouts:
+ job:
+ minutes: 300
+ action:
+ minutes: 300
+priority: medium
+
+actions:
+ - boot:
+ method: grub
+ os: centos
+ commands: centos-installed
+ auto_login:
+ login_prompt: 'login:'
+ username: root
+ password_prompt: 'Password:'
+ password: linaro
+ timeout:
+ minutes: 5
+ prompts:
+ - 'root@centos ~'
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installer-x86.yaml b/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installer-x86.yaml
new file mode 100644
index 0000000..5479c7e
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/grub-centos-installer-x86.yaml
@@ -0,0 +1,62 @@
+device_type: grub-x86
+
+job_name: grub-x86-debian-preseed
+timeouts:
+ job:
+ minutes: 300
+ action:
+ minutes: 300
+priority: medium
+
+actions:
+ - deploy:
+ timeout:
+ minutes: 10
+ to: tftp
+ kernel:
+ url: http://mirror.centos.org/altarch/7/os/i386/images/pxeboot/vmlinuz
+ ramdisk:
+ url: http://mirror.centos.org/altarch/7/os/i386/images/pxeboot/initrd.img
+ compression: xz
+ preseed:
+ url: http://ironhide.bounceme.net/centos/installer/centos-ks-lab-i386.cfg
+ os: centos_installer
+
+ - boot:
+ method: grub
+ commands: centos-installer
+ expect_shell: False
+ timeout:
+ minutes: 50
+ boot_finished:
+ - 'Restarting system.'
+ - 'dracut Warning: Killing all remaining processes'
+
+ - boot:
+ method: grub
+ os: centos
+ commands: centos-installed
+ auto_login:
+ login_prompt: 'login:'
+ username: root
+ password_prompt: 'Password:'
+ password: linaro
+ timeout:
+ minutes: 5
+ prompts:
+ - 'root@centos ~'
+
+ - test:
+ failure_retry: 3
+ name: kvm-basic-singlenode
+ timeout:
+ minutes: 5
+ definitions:
+ - repository: git://git.linaro.org/qa/test-definitions.git
+ from: git
+ path: ubuntu/smoke-tests-basic.yaml
+ name: smoke-tests
+ - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+ from: git
+ path: lava-test-shell/single-node/singlenode03.yaml
+ name: singlenode-advanced
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/grub-installer.yaml b/lava_dispatcher/pipeline/test/sample_jobs/grub-installer.yaml
index 00d9782..c0b10be 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/grub-installer.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/grub-installer.yaml
@@ -25,19 +25,28 @@ actions:
- boot:
method: grub
- expect-shell: false
+ expect_shell: False
commands: debian-installer
timeout:
minutes: 50
- boot-finished: 'reboot: Restarting system'
+ boot_finished:
+ - 'reboot: Restarting system'
+ - 'reboot: System halted'
+ - 'Requesting system halt'
- boot:
method: grub
commands: debian-installed
+ auto_login:
+ login_prompt: 'login:'
+ username: root
+ password_prompt: 'Password:'
+ password: linaro123
timeout:
minutes: 5
prompts:
- 'root@debian:~#'
+ - 'root@d02:~#'
- test:
failure_retry: 3
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-nfs.yaml b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-nfs.yaml
new file mode 100644
index 0000000..0621e79
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-nfs.yaml
@@ -0,0 +1,46 @@
+# Sample JOB definition for a juno u-boot NFS job
+
+device_type: juno
+
+job_name: juno-uboot-nfs
+timeouts:
+ job:
+ minutes: 15
+ action:
+ minutes: 5
+priority: medium
+visibility: public
+
+actions:
+
+ - deploy:
+ timeout:
+ minutes: 2
+ to: nfs
+ os: oe
+ nfsrootfs:
+ url: http://releases.linaro.org/openembedded/juno-lsk/16.02/linaro-image-minimal-genericarmv8-20160222-790.rootfs.tar.gz
+ compression: gz
+
+ - boot:
+ method: u-boot
+ commands: nfs
+ type: booti
+ parameters:
+ shutdown-message: "reboot: Restarting system"
+ prompts:
+ - '/ #'
+ - 'linaro-test'
+ - 'root@debian:~#'
+ - 'root@genericarmv8:~#'
+
+ - test:
+ failure_retry: 3
+ name: juno-basics
+ timeout:
+ minutes: 5
+ definitions:
+ - repository: git://git.linaro.org/qa/test-definitions.git
+ from: git
+ path: openembedded/smoke-tests-basic.yaml
+ name: smoke-tests
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-removable.yaml b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-removable.yaml
new file mode 100644
index 0000000..c47a676
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-removable.yaml
@@ -0,0 +1,81 @@
+# Sample JOB definition for a juno u-boot removable media job
+
+device_type: juno
+
+job_name: juno-uboot-removable
+timeouts:
+ job:
+ minutes: 15
+ action:
+ minutes: 5
+priority: medium
+visibility: public
+
+actions:
+
+ - deploy:
+ timeout:
+ minutes: 2
+ to: tftp
+ namespace: master-image
+ kernel:
+ url: http://images.validation.linaro.org/juno/kernel/Image
+ os: oe
+ dtb:
+ url: http://images.validation.linaro.org/juno/kernel/juno.dtb
+ nfsrootfs:
+ url: http://releases.linaro.org/openembedded/juno-lsk/16.02/linaro-image-minimal-genericarmv8-20160222-790.rootfs.tar.gz
+ compression: gz
+
+ - boot:
+ method: u-boot
+ namespace: master-image
+ commands: tftp
+ type: booti
+ parameters:
+ shutdown-message: "reboot: Restarting system"
+ prompts:
+ - '/ #'
+ - 'linaro-test'
+ - 'root@debian:~#'
+ - 'root@genericarmv8:~#'
+
+ - deploy:
+ timeout:
+ minutes: 10
+ to: usb
+ namespace: test-image
+ os: oe
+ # do NOT use the name image as this breaks during download action
+ image:
+ url: http://releases.linaro.org/openembedded/juno-lsk/16.02/lt-vexpress64-openembedded_minimal-armv8-gcc-5.2_20160121-736.img.gz
+ compression: gz
+ device: SanDisk_Ultra # needs to be exposed in the device-specific UI
+ download:
+ tool: /usr/bin/wget
+ options: --no-check-certificate --no-proxy --connect-timeout=30 -S --progress=dot:giga -O - {DOWNLOAD_URL}
+
+ - boot:
+ method: u-boot
+ namespace: test-image
+ commands: use-defaults
+ type: booti
+ parameters:
+ shutdown-message: "reboot: Restarting system"
+ prompts:
+ - '/ #'
+ - 'linaro-test'
+ - 'root@debian:~#'
+ - 'root@genericarmv8:~#'
+
+ - test:
+ namespace: test-image
+ failure_retry: 3
+ name: juno-basics
+ timeout:
+ minutes: 5
+ definitions:
+ - repository: git://git.linaro.org/qa/test-definitions.git
+ from: git
+ path: openembedded/smoke-tests-basic.yaml
+ name: smoke-tests
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-tftp.yaml b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-tftp.yaml
new file mode 100644
index 0000000..8217297
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/juno-uboot-tftp.yaml
@@ -0,0 +1,51 @@
+# Sample JOB definition for a juno u-boot tftp job
+
+device_type: juno
+
+job_name: juno-uboot-tftp
+timeouts:
+ job:
+ minutes: 15
+ action:
+ minutes: 5
+priority: medium
+visibility: public
+
+actions:
+
+ - deploy:
+ timeout:
+ minutes: 2
+ to: tftp
+ kernel:
+ url: http://images.validation.linaro.org/juno/kernel/Image
+ os: oe
+ dtb:
+ url: http://images.validation.linaro.org/juno/kernel/juno.dtb
+ nfsrootfs:
+ url: http://releases.linaro.org/openembedded/juno-lsk/16.02/linaro-image-minimal-genericarmv8-20160222-790.rootfs.tar.gz
+ compression: gz
+
+ - boot:
+ method: u-boot
+ commands: tftp
+ type: booti
+ parameters:
+ shutdown-message: "reboot: Restarting system"
+ prompts:
+ - '/ #'
+ - 'linaro-test'
+ - 'root@debian:~#'
+ - 'root@genericarmv8:~#'
+
+ - test:
+ failure_retry: 3
+ name: juno-basics
+ timeout:
+ minutes: 5
+ definitions:
+ - repository: git://git.linaro.org/qa/test-definitions.git
+ from: git
+ path: openembedded/smoke-tests-basic.yaml
+ name: smoke-tests
+
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/juno-uefi-tftp.yaml b/lava_dispatcher/pipeline/test/sample_jobs/juno-uefi-tftp.yaml
new file mode 100644
index 0000000..554bcdc
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/juno-uefi-tftp.yaml
@@ -0,0 +1,39 @@
+# Sample JOB definition for a juno uefi tftp job
+
+device_type: juno
+
+job_name: juno-uefi-tftp
+timeouts:
+ job:
+ minutes: 15
+ action:
+ minutes: 5
+ connection:
+ minutes: 3
+ uefi-menu-selector:
+ minutes: 5
+priority: medium
+visibility: public
+
+actions:
+
+ - deploy:
+ timeout:
+ minutes: 2
+ to: tftp
+ kernel:
+ url: http://images.validation.linaro.org/juno/kernel/Image
+ dtb:
+ url: http://images.validation.linaro.org/juno/kernel/juno.dtb
+ nfsrootfs:
+ url: http://releases.linaro.org/openembedded/juno-lsk/15.09/linaro-image-minimal-genericarmv8-20150921-770.rootfs.tar.gz
+ compression: gz
+ os: oe
+
+ - boot:
+ method: uefi-menu
+ commands: tftp
+ prompts:
+ - '/ #'
+ - 'linaro-test'
+ - 'root@debian:~#'
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml b/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml
index cecd967..59381dc 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/kvm-params.yaml
@@ -45,6 +45,13 @@ actions:
VARIABLE_NAME_1: "eth2"
VARIABLE_NAME_2: "wlan0"
name: smoke-tests
+ - repository: http://git.linaro.org/people/neil.williams/temp-functional-tests.git
+ from: git
+ path: params.yaml
+ params:
+ VARIABLE_NAME_1: "eth2"
+ VARIABLE_NAME_2: "wlan0"
+ name: smoke-tests
- repository: http://git.linaro.org/lava-team/lava-functional-tests.git
from: git
path: lava-test-shell/single-node/singlenode03.yaml
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml b/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml
index d234cd5..a021040 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/kvm.yaml
@@ -54,6 +54,7 @@ actions:
from: git
path: lava-test-shell/single-node/singlenode03.yaml
name: singlenode-advanced
+ revision: 441b61
context:
arch: amd64
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/mustang-image.yaml b/lava_dispatcher/pipeline/test/sample_jobs/mustang-image.yaml
new file mode 100644
index 0000000..a956bed
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/sample_jobs/mustang-image.yaml
@@ -0,0 +1,58 @@
+# Sample JOB definition for a u-boot job
+
+device_type: mustang
+
+job_name: uboot-pipeline-mustang-image
+timeouts:
+ job:
+ minutes: 15 # timeout for the whole job (default: ??h)
+ action:
+ minutes: 5 # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+priority: medium
+visibility: public
+
+# example old-style job: https://staging.validation.linaro.org/scheduler/job/113682/definition
+
+actions:
+
+ # needs to be a list of hashes to retain the order
+ - deploy:
+ timeout:
+ minutes: 2
+ to: tftp
+ kernel:
+ url: http://storage.kernelci.org/stable-rc/v4.6.5-34-gbbfa6a5ac148/arm64-defconfig+CONFIG_RANDOMIZE_BASE=y/Image
+ type: image
+ ramdisk:
+ url: http://images.validation.linaro.org/functional-test-images/common/linaro-image-minimal-initramfs-genericarmv7a.cpio.gz.u-boot
+ header: u-boot
+ compression: gz
+ os: oe
+ dtb:
+ url: http://storage.kernelci.org/stable-rc/v4.6.5-34-gbbfa6a5ac148/arm64-defconfig+CONFIG_RANDOMIZE_BASE=y/dtbs/apm/apm-mustang.dtb
+
+ - boot:
+ method: u-boot
+ commands: ramdisk
+ type: uimage
+ parameters:
+ shutdown-message: "reboot: Restarting system"
+ prompts:
+ - 'linaro-test'
+ - 'root@debian:~#'
+
+ - test:
+ failure_retry: 3
+ name: kvm-basic-singlenode # is not present, use "test $N"
+ # only s, m & h are supported.
+ timeout:
+ minutes: 5 # uses install:deps, so takes longer than singlenode01
+ definitions:
+ - repository: git://git.linaro.org/qa/test-definitions.git
+ from: git
+ path: ubuntu/smoke-tests-basic.yaml
+ name: smoke-tests
+ - repository: http://git.linaro.org/lava-team/lava-functional-tests.git
+ from: git
+ path: lava-test-shell/single-node/singlenode03.yaml
+ name: singlenode-advanced
diff --git a/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml b/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml
index de61c81..5520a6b 100644
--- a/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml
+++ b/lava_dispatcher/pipeline/test/sample_jobs/uboot-ramdisk.yaml
@@ -7,7 +7,13 @@ timeouts:
job:
minutes: 15 # timeout for the whole job (default: ??h)
action:
- minutes: 5 # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+ minutes: 5 # default timeout applied for each action; can be overriden in the action itself (default: ?h)
+ connection:
+ minutes: 4
+ actions:
+ lava-test-shell:
+ minutes: 7
+
priority: medium
visibility: public
@@ -42,10 +48,9 @@ actions:
- test:
failure_retry: 3
- name: kvm-basic-singlenode # is not present, use "test $N"
- # only s, m & h are supported.
+ name: kvm-basic-singlenode
timeout:
- minutes: 5 # uses install:deps, so takes longer than singlenode01
+ minutes: 5
definitions:
- repository: git://git.linaro.org/qa/test-definitions.git
from: git
diff --git a/lava_dispatcher/pipeline/test/test_connections.py b/lava_dispatcher/pipeline/test/test_connections.py
index 9b127b4..f18a4c0 100644
--- a/lava_dispatcher/pipeline/test/test_connections.py
+++ b/lava_dispatcher/pipeline/test/test_connections.py
@@ -281,12 +281,16 @@ class TestTimeouts(unittest.TestCase):
def test_action_timeout(self):
factory = Factory()
job = factory.create_bbb_job('sample_jobs/uboot-ramdisk.yaml')
+ job.validate()
deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0]
+ test_shell = [action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell'][0]
+ self.assertEqual(test_shell.connection_timeout.duration, 240) # job specifies 4 minutes
+ self.assertEqual(test_shell.timeout.duration, 420) # job specifies 7 minutes
self.assertEqual(deploy.timeout.duration, 120) # job specifies 2 minutes
- self.assertEqual(deploy.connection_timeout.duration, Timeout.default_duration())
+ self.assertNotEqual(deploy.connection_timeout.duration, Timeout.default_duration())
+ self.assertNotEqual(deploy.connection_timeout.duration, test_shell.connection_timeout)
self.assertEqual(test_action.timeout.duration, 300)
- self.assertEqual(test_action.connection_timeout.duration, Timeout.default_duration())
def test_job_connection_timeout(self):
"""
@@ -300,9 +304,14 @@ class TestTimeouts(unittest.TestCase):
for action in job.pipeline.actions:
if action.internal_pipeline:
for check_action in action.internal_pipeline.actions:
- if check_action.connection_timeout and check_action.name != 'uboot-retry':
- # uboot-retry has an override in this sample job
+ if check_action.connection_timeout and check_action.name not in ['uboot-retry', 'lava-test-shell']:
+ # lava-test-shell and uboot-retry have overrides in this sample job
+ # lava-test-shell from the job, uboot-retry from the device
self.assertEqual(check_action.connection_timeout.duration, 20)
+ deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+ test_action = [action for action in job.pipeline.actions if action.name == 'lava-test-retry'][0]
+ test_shell = [action for action in test_action.internal_pipeline.actions if action.name == 'lava-test-shell'][0]
+ self.assertEqual(test_shell.connection_timeout.duration, 20)
def test_action_connection_timeout(self):
"""
@@ -311,6 +320,8 @@ class TestTimeouts(unittest.TestCase):
with open(os.path.join(
os.path.dirname(__file__), './sample_jobs/uboot-ramdisk.yaml'), 'r') as uboot_ramdisk:
data = yaml.load(uboot_ramdisk)
+ connection_timeout = Timeout.parse(data['timeouts']['connection'])
+ self.assertEqual(connection_timeout, 240)
data['timeouts']['connections'] = {'uboot-retry': {}}
data['timeouts']['connections']['uboot-retry'] = {'seconds': 20}
job = self.create_custom_job(yaml.dump(data))
@@ -318,7 +329,7 @@ class TestTimeouts(unittest.TestCase):
retry = [action for action in boot.internal_pipeline.actions if action.name == 'uboot-retry'][0]
self.assertEqual(retry.timeout.duration, Timeout.parse(job.device['timeouts']['actions'][retry.name]))
self.assertEqual(
- Timeout.parse(job.device['timeouts']['connections'][retry.name]),
+ Timeout.parse(data['timeouts']['connections'][retry.name]),
retry.connection_timeout.duration
)
self.assertEqual(90, retry.timeout.duration)
diff --git a/lava_dispatcher/pipeline/test/test_defs.py b/lava_dispatcher/pipeline/test/test_defs.py
index eeb84e1..47e3558 100644
--- a/lava_dispatcher/pipeline/test/test_defs.py
+++ b/lava_dispatcher/pipeline/test/test_defs.py
@@ -18,14 +18,17 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
+import re
import os
-
import sys
import glob
import stat
+import yaml
+import pexpect
import unittest
from lava_dispatcher.pipeline.power import FinalizeAction
-from lava_dispatcher.pipeline.actions.test.shell import TestShellRetry
+from lava_dispatcher.pipeline.action import InfrastructureError
+from lava_dispatcher.pipeline.actions.test.shell import TestShellRetry, PatternFixup
from lava_dispatcher.pipeline.test.test_basic import Factory
from lava_dispatcher.pipeline.test.test_uboot import Factory as BBBFactory
from lava_dispatcher.pipeline.actions.deploy import DeployAction
@@ -39,12 +42,24 @@ from lava_dispatcher.pipeline.actions.deploy.testdef import (
)
from lava_dispatcher.pipeline.actions.boot import BootAction
from lava_dispatcher.pipeline.actions.deploy.overlay import OverlayAction
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
# pylint: disable=duplicate-code
+# Test the loading of test definitions within the deploy stage
+def allow_missing_path(function, testcase, path):
+ try:
+ function()
+ except InfrastructureError as exc:
+ if not infrastructure_error(path):
+ testcase.fail(exc)
-# Test the loading of test definitions within the deploy stage
+
+def check_missing_path(testcase, exception, path):
+ if isinstance(exception, InfrastructureError):
+ if not infrastructure_error(path):
+ testcase.fail(exception)
class TestDefinitionHandlers(unittest.TestCase): # pylint: disable=too-many-public-methods
@@ -89,6 +104,17 @@ class TestDefinitionHandlers(unittest.TestCase): # pylint: disable=too-many-pub
self.assertIsNotNone(testdef.parameters['deployment_data']['lava_test_results_dir'])
# self.assertIsNotNone(testdef.job.device['hostname'])
+ def test_vcs_parameters(self):
+ deploy = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
+ overlay = [action for action in deploy.internal_pipeline.actions if action.name == 'lava-overlay'][0]
+ testdef = [action for action in overlay.internal_pipeline.actions if action.name == 'test-definition'][0]
+ git_repos = [action for action in testdef.internal_pipeline.actions if action.name == 'git-repo-action']
+ for git_repo in git_repos:
+ if git_repo.parameters['repository'] == 'http://git.linaro.org/lava-team/lava-functional-tests.git':
+ self.assertIn('revision', git_repo.parameters)
+ else:
+ self.assertNotIn('revision', git_repo.parameters)
+
def test_overlay(self):
script_list = [
@@ -146,7 +172,7 @@ class TestDefinitionSimple(unittest.TestCase): # pylint: disable=too-many-publi
def test_job_without_tests(self):
deploy = boot = finalize = None
- self.job.pipeline.validate_actions()
+ allow_missing_path(self.job.pipeline.validate_actions, self, 'qemu-system-x86_64')
for action in self.job.pipeline.actions:
self.assertNotIsInstance(action, TestDefinitionAction)
self.assertNotIsInstance(action, OverlayAction)
@@ -158,6 +184,7 @@ class TestDefinitionSimple(unittest.TestCase): # pylint: disable=too-many-publi
self.assertIsInstance(finalize, FinalizeAction)
self.assertEqual(len(self.job.pipeline.actions), 3) # deploy, boot, finalize
apply_overlay = deploy.pipeline.children[deploy.pipeline][4]
+ self.assertIsNotNone(apply_overlay)
class TestDefinitionParams(unittest.TestCase): # pylint: disable=too-many-public-methods
@@ -169,7 +196,7 @@ class TestDefinitionParams(unittest.TestCase): # pylint: disable=too-many-publi
def test_job_without_tests(self):
boot = finalize = None
- self.job.pipeline.validate_actions()
+ allow_missing_path(self.job.pipeline.validate_actions, self, 'qemu-system-x86_64')
deploy = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
overlay = [action for action in deploy.internal_pipeline.actions if action.name == 'lava-overlay'][0]
testdef = [action for action in overlay.internal_pipeline.actions if action.name == 'test-definition'][0]
@@ -191,14 +218,30 @@ class TestDefinitionParams(unittest.TestCase): # pylint: disable=too-many-publi
self.assertIsInstance(install, TestInstallAction)
self.assertIsInstance(runsh, TestRunnerAction)
self.assertIsNot(list(install.parameters.items()), [])
- testdef = {'params': {'VARIABLE_NAME_1': 'value_1', 'VARIABLE_NAME_2': 'value_2'}}
+ testdef = {'params': {'VARIABLE_NAME_1': 'value_1',
+ 'VARIABLE_NAME_2': 'value_2'}}
+ content = test.handle_parameters(testdef)
+ self.assertEqual(
+ set(content),
+ {
+ '###default parameters from test definition###\n',
+ "VARIABLE_NAME_1='value_1'\n", "VARIABLE_NAME_2='value_2'\n",
+ '######\n', '###test parameters from job submission###\n',
+ "VARIABLE_NAME_1='eth2'\n", "VARIABLE_NAME_2='wlan0'\n",
+ '######\n'
+ }
+ )
+ testdef = {'parameters': {'VARIABLE_NAME_1': 'value_1',
+ 'VARIABLE_NAME_2': 'value_2'}}
content = test.handle_parameters(testdef)
self.assertEqual(
set(content),
{
- '###default parameters from yaml###\n', "VARIABLE_NAME_1='value_1'\n", "VARIABLE_NAME_2='value_2'\n",
- '######\n', '###test parameters from json###\n', "VARIABLE_NAME_1='eth2'\n",
- "VARIABLE_NAME_2='wlan0'\n", '######\n'
+ '###default parameters from test definition###\n',
+ "VARIABLE_NAME_1='value_1'\n", "VARIABLE_NAME_2='value_2'\n",
+ '######\n', '###test parameters from job submission###\n',
+ "VARIABLE_NAME_1='eth2'\n", "VARIABLE_NAME_2='wlan0'\n",
+ '######\n'
}
)
@@ -243,8 +286,8 @@ class TestSkipInstall(unittest.TestCase): # pylint: disable=too-many-public-met
self.assertIsNotNone(self.job)
deploy = [action for action in self.job.pipeline.actions if action.name == 'tftp-deploy'][0]
prepare = [action for action in deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
- apply = [action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay'][0]
- testoverlay = [action for action in apply.internal_pipeline.actions if action.name == 'test-definition'][0]
+ lava_apply = [action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay'][0]
+ testoverlay = [action for action in lava_apply.internal_pipeline.actions if action.name == 'test-definition'][0]
testdefs = [action for action in testoverlay.internal_pipeline.actions if action.name == 'test-install-overlay']
ubuntu_testdef = None
single_testdef = None
@@ -263,3 +306,141 @@ class TestSkipInstall(unittest.TestCase): # pylint: disable=too-many-public-met
['keys', 'sources', 'deps', 'steps', 'all']
)
self.assertEqual(single_testdef.skip_options, ['deps'])
+
+
+class TestDefinitions(unittest.TestCase):
+ """
+ For compatibility until the V1 code is removed and we can start
+ cleaning up Lava Test Shell.
+ Parsing patterns in the Test Shell Definition YAML are problematic,
+ difficult to debug and rely on internal python syntax.
+ The fixupdict is even more confusing for all concerned.
+ """
+
+ def setUp(self):
+ super(TestDefinitions, self).setUp()
+ self.testdef = os.path.join(os.path.dirname(__file__), 'testdefs', 'params.yaml')
+ self.res_data = os.path.join(os.path.dirname(__file__), 'testdefs', 'result-data.txt')
+ factory = BBBFactory()
+ self.job = factory.create_bbb_job("sample_jobs/bbb-nfs-url.yaml")
+
+ def test_pattern(self):
+ self.assertTrue(os.path.exists(self.testdef))
+ with open(self.testdef, 'r') as par:
+ params = yaml.load(par)
+ self.assertIn('parse', params.keys())
+ line = 'test1a: pass'
+ self.assertEqual(
+ '(?P<test_case_id>.*-*):\s+(?P<result>(pass|fail))',
+ params['parse']['pattern'])
+ match = re.search(params['parse']['pattern'], line)
+ self.assertIsNotNone(match)
+ self.assertEqual(match.group(), line)
+ self.assertEqual(match.group(1), 'test1a')
+ self.assertEqual(match.group(2), 'pass')
+
+ def test_v1_defaults(self):
+ pattern = PatternFixup(testdef=None, count=0)
+ # without a name from a testdef, the pattern is not valid.
+ self.assertFalse(pattern.valid())
+ with open(self.testdef, 'r') as par:
+ params = yaml.load(par)
+ pattern = PatternFixup(testdef=params, count=0)
+ self.assertTrue(pattern.valid())
+
+ def test_definition_lists(self):
+ self.job.validate()
+ self.assertIn('common', self.job.context)
+ self.assertIn("test-definition", self.job.context['common'])
+ self.assertIn("testdef_index", self.job.context['common']['test-definition'])
+ self.assertIn("test_list", self.job.context['common']['test-definition'])
+ self.assertEqual(
+ self.job.context['common']['test-definition']['testdef_index'],
+ {0: 'smoke-tests', 1: 'singlenode-advanced'}
+ )
+ test_list = self.job.context['common']['test-definition']['test_list']
+ self.assertEqual(len(test_list), 2)
+ self.assertIn('path', test_list[0])
+ self.assertIn('path', test_list[1])
+ self.assertIn('name', test_list[0])
+ self.assertIn('name', test_list[1])
+ self.assertEqual(test_list[0]['path'], 'ubuntu/smoke-tests-basic.yaml')
+ self.assertEqual(test_list[0]['name'], 'smoke-tests')
+ self.assertEqual(test_list[1]['path'], 'lava-test-shell/single-node/singlenode03.yaml')
+ self.assertEqual(test_list[1]['name'], 'singlenode-advanced')
+ self.assertEqual(
+ self.job.context['common']['test-runscript-overlay']['testdef_levels'],
+ {
+ '1.3.2.4.4': '0_smoke-tests',
+ '1.3.2.4.8': '1_singlenode-advanced'
+ }
+ )
+ tftp_deploy = [action for action in self.job.pipeline.actions if action.name == 'tftp-deploy'][0]
+ prepare = [action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
+ overlay = [action for action in prepare.internal_pipeline.actions if action.name == 'lava-overlay'][0]
+ definition = [action for action in overlay.internal_pipeline.actions if action.name == 'test-definition'][0]
+ git_repos = [action for action in definition.internal_pipeline.actions if action.name == 'git-repo-action']
+ # uuid = "%s_%s" % (self.job.job_id, self.level)
+ self.assertEqual(
+ {repo.uuid for repo in git_repos},
+ {'4212_1.3.2.4.1', '4212_1.3.2.4.5'}
+ )
+ self.assertEqual(
+ set(git_repos[0].get_common_data('test-runscript-overlay', 'testdef_levels').values()),
+ {'1_singlenode-advanced', '0_smoke-tests'}
+ )
+ # fake up a run step
+ with open(self.testdef, 'r') as par:
+ params = yaml.load(par)
+ self.assertEqual(
+ '(?P<test_case_id>.*-*):\s+(?P<result>(pass|fail))',
+ params['parse']['pattern'])
+ self.job.context.setdefault('test', {})
+ for git_repo in git_repos:
+ self.job.context['test'].setdefault(git_repo.uuid, {})
+ self.job.context['test'][git_repo.uuid]['testdef_pattern'] = {'pattern': params['parse']['pattern']}
+ self.assertEqual(
+ self.job.context['test'],
+ {
+ '4212_1.3.2.4.5': {'testdef_pattern': {'pattern': '(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))'}},
+ '4212_1.3.2.4.1': {'testdef_pattern': {'pattern': '(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))'}}}
+ )
+ testdef_index = self.job.context['common']['test-definition']['testdef_index']
+ start_run = '0_smoke-tests'
+ uuid_list = definition.get_common_data('repo-action', 'uuid-list')
+ for key, value in testdef_index.items():
+ if start_run == "%s_%s" % (key, value):
+ self.assertEqual('4212_1.3.2.4.1', uuid_list[key])
+ self.assertEqual(
+ self.job.context['test'][uuid_list[key]]['testdef_pattern']['pattern'],
+ '(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))'
+ )
+
+ @unittest.skipIf(sys.version > '3', 'pexpect issues in python3')
+ def test_defined_pattern(self):
+ """
+ For python3 support, need to resolve:
+ TypeError: cannot use a bytes pattern on a string-like object
+ TypeError: cannot use a string pattern on a bytes-like object
+ whilst retaining re_pat as a compiled regular expression in the
+ pexpect support.
+ """
+ data = """test1a: pass
+test2a: fail
+test3a: skip
+\"test4a:\" \"unknown\"
+ """
+ with open(self.testdef, 'r') as par:
+ params = yaml.load(par)
+ pattern = params['parse']['pattern']
+ re_pat = re.compile(pattern, re.M)
+ match = re.search(re_pat, data)
+ if match:
+ self.assertEqual(match.groupdict(), {'test_case_id': 'test1a', 'result': 'pass'})
+ child = pexpect.spawn('cat', [self.res_data])
+ child.expect([re_pat, pexpect.EOF])
+ self.assertEqual(child.after, b'test1a: pass')
+ child.expect([re_pat, pexpect.EOF])
+ self.assertEqual(child.after, b'test2a: fail')
+ child.expect([re_pat, pexpect.EOF])
+ self.assertEqual(child.after, pexpect.EOF)
diff --git a/lava_dispatcher/pipeline/test/test_fastboot.py b/lava_dispatcher/pipeline/test/test_fastboot.py
index f26c7da..e669afd 100644
--- a/lava_dispatcher/pipeline/test/test_fastboot.py
+++ b/lava_dispatcher/pipeline/test/test_fastboot.py
@@ -58,6 +58,7 @@ class TestFastbootDeploy(unittest.TestCase): # pylint: disable=too-many-public-
def test_deploy_job(self):
self.assertEqual(self.job.pipeline.job, self.job)
+ self.assertIsInstance(self.job.device['device_path'], list)
for action in self.job.pipeline.actions:
if isinstance(action, DeployAction):
self.assertEqual(action.job, self.job)
diff --git a/lava_dispatcher/pipeline/test/test_grub.py b/lava_dispatcher/pipeline/test/test_grub.py
index a1bb5cc..eb8d523 100644
--- a/lava_dispatcher/pipeline/test/test_grub.py
+++ b/lava_dispatcher/pipeline/test/test_grub.py
@@ -197,7 +197,7 @@ class TestGrubAction(unittest.TestCase): # pylint: disable=too-many-public-meth
for action in job.pipeline.actions:
action.validate()
if not action.valid:
- print(action.errors)
+ raise RuntimeError(action.errors)
self.assertTrue(action.valid)
job.validate()
self.assertEqual(job.pipeline.errors, [])
diff --git a/lava_dispatcher/pipeline/test/test_kvm.py b/lava_dispatcher/pipeline/test/test_kvm.py
index 7caf119..1ec1e51 100644
--- a/lava_dispatcher/pipeline/test/test_kvm.py
+++ b/lava_dispatcher/pipeline/test/test_kvm.py
@@ -26,7 +26,12 @@ import yaml
import pexpect
from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
-from lava_dispatcher.pipeline.action import Pipeline, Action, JobError
+from lava_dispatcher.pipeline.action import (
+ Pipeline,
+ Action,
+ JobError,
+ InfrastructureError,
+)
from lava_dispatcher.pipeline.test.test_basic import Factory, pipeline_reference
from lava_dispatcher.pipeline.job import Job
from lava_dispatcher.pipeline.actions.deploy import DeployAction
@@ -35,6 +40,8 @@ from lava_dispatcher.pipeline.device import NewDevice
from lava_dispatcher.pipeline.parser import JobParser
from lava_dispatcher.pipeline.test.test_messages import FakeConnection
from lava_dispatcher.pipeline.utils.messages import LinuxKernelMessages
+from lava_dispatcher.pipeline.test.test_defs import allow_missing_path, check_missing_path
+from lava_dispatcher.pipeline.utils.shell import infrastructure_error
# pylint: disable=invalid-name
@@ -166,7 +173,7 @@ class TestKVMBasicDeploy(unittest.TestCase): # pylint: disable=too-many-public-
def test_validate(self):
try:
- self.job.pipeline.validate_actions()
+ allow_missing_path(self.job.pipeline.validate_actions, self, 'qemu-system-x86_64')
except JobError as exc:
self.fail(exc)
for action in self.job.pipeline.actions:
@@ -231,7 +238,7 @@ class TestKVMQcow2Deploy(unittest.TestCase): # pylint: disable=too-many-public-
def test_validate(self):
try:
- self.job.pipeline.validate_actions()
+ allow_missing_path(self.job.pipeline.validate_actions, self, 'qemu-system-x86_64')
except JobError as exc:
self.fail(exc)
for action in self.job.pipeline.actions:
@@ -283,6 +290,8 @@ class TestKVMInlineTestDeploy(unittest.TestCase): # pylint: disable=too-many-pu
self.job.pipeline.validate_actions()
except JobError as exc:
self.fail(exc)
+ except InfrastructureError:
+ pass
for action in self.job.pipeline.actions:
self.assertEqual([], action.errors)
@@ -370,7 +379,9 @@ class TestAutoLogin(unittest.TestCase):
'username': 'root'},
'prompts': ['root@debian:~#']})
- self.assertRaises(JobError, self.job.validate)
+ with self.assertRaises((JobError, InfrastructureError)) as check:
+ self.job.validate()
+ check_missing_path(self, check, 'qemu-system-x86_64')
def test_missing_autologin_void_prompts_list(self):
self.assertEqual(len(self.job.pipeline.describe()), 4)
@@ -380,7 +391,9 @@ class TestAutoLogin(unittest.TestCase):
autologinaction.parameters.update({'prompts': []})
- self.assertRaises(JobError, self.job.validate)
+ with self.assertRaises((JobError, InfrastructureError)) as check:
+ self.job.validate()
+ check_missing_path(self, check, 'qemu-system-x86_64')
def test_missing_autologin_void_prompts_list_item(self):
self.assertEqual(len(self.job.pipeline.describe()), 4)
@@ -390,7 +403,9 @@ class TestAutoLogin(unittest.TestCase):
autologinaction.parameters.update({'prompts': ['']})
- self.assertRaises(JobError, self.job.validate)
+ with self.assertRaises((JobError, InfrastructureError)) as check:
+ self.job.validate()
+ check_missing_path(self, check, 'qemu-system-x86_64')
def test_missing_autologin_void_prompts_list_item2(self):
self.assertEqual(len(self.job.pipeline.describe()), 4)
@@ -400,7 +415,9 @@ class TestAutoLogin(unittest.TestCase):
autologinaction.parameters.update({'prompts': ['root@debian:~#', '']})
- self.assertRaises(JobError, self.job.validate)
+ with self.assertRaises((JobError, InfrastructureError)) as check:
+ self.job.validate()
+ check_missing_path(self, check, 'qemu-system-x86_64')
def test_missing_autologin_prompts_list(self):
self.assertEqual(len(self.job.pipeline.describe()), 4)
@@ -427,7 +444,9 @@ class TestAutoLogin(unittest.TestCase):
autologinaction.parameters.update({'prompts': ''})
- self.assertRaises(JobError, self.job.validate)
+ with self.assertRaises((JobError, InfrastructureError)) as check:
+ self.job.validate()
+ check_missing_path(self, check, 'qemu-system-x86_64')
def test_missing_autologin_prompts_str(self):
self.assertEqual(len(self.job.pipeline.describe()), 4)
@@ -544,6 +563,8 @@ class TestChecksum(unittest.TestCase):
self.job.pipeline.validate_actions()
except JobError as exc:
self.fail(exc)
+ except InfrastructureError as exc:
+ check_missing_path(self, exc, 'qemu-system-x86_64')
for action in self.job.pipeline.actions:
self.assertEqual([], action.errors)
@@ -582,6 +603,8 @@ class TestKvmUefi(unittest.TestCase): # pylint: disable=too-many-public-methods
factory = Factory()
self.job = factory.create_kvm_job('sample_jobs/kvm-uefi.yaml', mkdtemp())
+ @unittest.skipIf(infrastructure_error('qemu-system-x86_64'),
+ 'qemu-system-x86_64 not installed')
def test_uefi_path(self):
deploy = [action for action in self.job.pipeline.actions if action.name == 'deployimages'][0]
downloaders = [action for action in deploy.internal_pipeline.actions if action.name == 'download_retry']
diff --git a/lava_dispatcher/pipeline/test/test_menus.py b/lava_dispatcher/pipeline/test/test_menus.py
index cc640cc..68c5a83 100644
--- a/lava_dispatcher/pipeline/test/test_menus.py
+++ b/lava_dispatcher/pipeline/test/test_menus.py
@@ -120,7 +120,7 @@ class TestUefi(unittest.TestCase): # pylint: disable=too-many-public-methods
self.assertEqual(selector.boot_message, params['boot_message']) # final prompt
self.assertEqual(
selector.character_delay,
- self.job.device['character-delays']['boot'])
+ self.job.device['character_delays']['boot'])
def test_uefi_job(self):
self.assertIsNotNone(self.job)
diff --git a/lava_dispatcher/pipeline/test/test_multinode.py b/lava_dispatcher/pipeline/test/test_multinode.py
index 94f31fe..81a7ebc 100644
--- a/lava_dispatcher/pipeline/test/test_multinode.py
+++ b/lava_dispatcher/pipeline/test/test_multinode.py
@@ -20,7 +20,6 @@
import os
-import glob
import yaml
import uuid
import json
@@ -34,8 +33,15 @@ from lava_dispatcher.pipeline.actions.boot.qemu import BootQemuRetry, CallQemuAc
from lava_dispatcher.pipeline.actions.boot import BootAction
from lava_dispatcher.pipeline.actions.test.multinode import MultinodeTestAction
from lava_dispatcher.pipeline.protocols.multinode import MultinodeProtocol
-from lava_dispatcher.pipeline.action import TestError, JobError, Timeout
+from lava_dispatcher.pipeline.action import (
+ TestError,
+ JobError,
+ Timeout,
+ InfrastructureError,
+)
from lava_dispatcher.pipeline.utils.constants import LAVA_MULTINODE_SYSTEM_TIMEOUT
+from lava_dispatcher.pipeline.test.test_defs import allow_missing_path
+
# pylint: disable=protected-access,superfluous-parens
@@ -86,9 +92,9 @@ class TestMultinode(unittest.TestCase): # pylint: disable=too-many-public-metho
def test_multinode_jobs(self):
self.assertIsNotNone(self.client_job)
self.assertIsNotNone(self.server_job)
- self.client_job.validate()
+ allow_missing_path(self.client_job.validate, self, 'qemu-system-x86_64')
+ allow_missing_path(self.server_job.validate, self, 'qemu-system-x86_64')
self.assertEqual(self.client_job.pipeline.errors, [])
- self.server_job.validate()
self.assertEqual(self.server_job.pipeline.errors, [])
def test_protocol(self):
@@ -100,8 +106,11 @@ class TestMultinode(unittest.TestCase): # pylint: disable=too-many-public-metho
self.assertEqual(client_protocol.name, server_protocol.name)
self.assertIn('target_group', client_protocol.parameters['protocols'][client_protocol.name].keys())
self.assertIn('actions', self.client_job.parameters.keys())
- self.client_job.validate()
- self.server_job.validate()
+ try:
+ self.client_job.validate()
+ self.server_job.validate()
+ except InfrastructureError:
+ pass
self.assertIn('role', client_protocol.parameters['protocols'][client_protocol.name].keys())
self.assertEqual([], self.client_job.pipeline.errors)
self.assertEqual([], self.server_job.pipeline.errors)
@@ -174,6 +183,13 @@ class TestMultinode(unittest.TestCase): # pylint: disable=too-many-public-metho
self.assertNotEqual(protocol_names, [])
protocols = [protocol for protocol in testshell.job.protocols if protocol.name in protocol_names]
self.assertNotEqual(protocols, [])
+ multinode_dict = {'multinode': '<LAVA_MULTI_NODE> <LAVA_(\\S+) ([^>]+)>'}
+ self.assertEqual(multinode_dict, testshell.multinode_dict)
+ self.assertIn('multinode', testshell.patterns)
+ self.assertEqual(testshell.patterns['multinode'], multinode_dict['multinode'])
+ testshell._reset_patterns()
+ self.assertIn('multinode', testshell.patterns)
+ self.assertEqual(testshell.patterns['multinode'], multinode_dict['multinode'])
for protocol in protocols:
protocol.debug_setup()
if isinstance(protocol, MultinodeProtocol):
@@ -185,7 +201,7 @@ class TestMultinode(unittest.TestCase): # pylint: disable=too-many-public-metho
def test_multinode_description(self):
self.assertIsNotNone(self.client_job)
- self.client_job.validate()
+ allow_missing_path(self.client_job.validate, self, 'qemu-system-x86_64')
# check that the description can be re-loaded as valid YAML
for action in self.client_job.pipeline.actions:
data = action.explode()
diff --git a/lava_dispatcher/pipeline/test/test_removable.py b/lava_dispatcher/pipeline/test/test_removable.py
index d4e640a..ef2b07b 100644
--- a/lava_dispatcher/pipeline/test/test_removable.py
+++ b/lava_dispatcher/pipeline/test/test_removable.py
@@ -30,6 +30,22 @@ from lava_dispatcher.pipeline.utils.strings import substitute
from lava_dispatcher.pipeline.utils.shell import infrastructure_error
+class Factory(object): # pylint: disable=too-few-public-methods
+ """
+ Not Model based, this is not a Django factory.
+ Factory objects are dispatcher based classes, independent
+ of any database objects.
+ """
+ def create_job(self, sample_job, device_file, output_dir='/tmp/'): # pylint: disable=no-self-use
+ device = NewDevice(os.path.join(os.path.dirname(__file__), device_file))
+ yaml = os.path.join(os.path.dirname(__file__), sample_job)
+ with open(yaml) as sample_job_data:
+ parser = JobParser()
+ job = parser.parse(sample_job_data, device, 4212, None, None, None,
+ output_dir=output_dir)
+ return job
+
+
class TestRemovable(unittest.TestCase): # pylint: disable=too-many-public-methods
def test_device_parameters(self):
@@ -76,7 +92,7 @@ class TestRemovable(unittest.TestCase): # pylint: disable=too-many-public-metho
if isinstance(action, DeployAction):
if isinstance(action, MassStorage):
self.assertTrue(action.valid)
- agent = action.parameters['download']
+ agent = action.parameters['download']['tool']
self.assertTrue(agent.startswith('/')) # needs to be a full path but on the device, so avoid os.path
self.assertIn(action.parameters['device'], job.device['parameters']['media']['usb'])
mass_storage = action
@@ -115,6 +131,27 @@ class TestRemovable(unittest.TestCase): # pylint: disable=too-many-public-metho
self.assertTrue(type(dd_action.get_common_data('uuid', 'boot_part')) is str)
self.assertEqual('0:1', dd_action.get_common_data('uuid', 'boot_part'))
+ def test_juno_deployment(self):
+ factory = Factory()
+ job = factory.create_job('sample_jobs/juno-uboot-removable.yaml', '../devices/juno-uboot.yaml')
+ job.validate()
+ self.assertEqual(job.pipeline.errors, [])
+ self.assertIn('usb', job.device['parameters']['media'].keys())
+ deploy_params = [methods for methods in job.parameters['actions'] if 'deploy' in methods.keys()][1]['deploy']
+ self.assertIn('device', deploy_params)
+ self.assertIn(deploy_params['device'], job.device['parameters']['media']['usb'])
+ self.assertIn('uuid', job.device['parameters']['media']['usb'][deploy_params['device']])
+ self.assertIn('device_id', job.device['parameters']['media']['usb'][deploy_params['device']])
+ self.assertNotIn('boot_part', job.device['parameters']['media']['usb'][deploy_params['device']])
+ deploy_action = [action for action in job.pipeline.actions if action.name == 'storage-deploy'][0]
+ download_action = [action for action in deploy_action.internal_pipeline.actions if action.name == 'download_retry'][0]
+ overlay_action = [action for action in deploy_action.internal_pipeline.actions if action.name == 'lava-overlay'][0]
+ self.assertEqual(['test-image'], overlay_action.action_namespaces)
+ self.assertIn('lava_test_results_dir', deploy_action.data)
+ self.assertIn('/lava-', deploy_action.data['lava_test_results_dir'])
+ self.assertIsInstance(deploy_action, MassStorage)
+ self.assertIn('image', deploy_action.parameters.keys())
+
@unittest.skipIf(infrastructure_error('mkimage'), "u-boot-tools not installed")
def test_primary_media(self):
"""
@@ -155,7 +192,7 @@ class TestRemovable(unittest.TestCase): # pylint: disable=too-many-public-metho
self.assertIn('type', boot_params)
self.assertGreater(len(job.pipeline.actions), 1)
self.assertIsNotNone(job.pipeline.actions[1].internal_pipeline)
- u_boot_action = [action for action in job.pipeline.actions if action.name == 'uboot-action'][1].internal_pipeline.actions[1]
+ u_boot_action = [action for action in job.pipeline.actions if action.name == 'uboot-action'][1].internal_pipeline.actions[2]
self.assertIsNotNone(u_boot_action.get_common_data('u-boot', 'device'))
self.assertEqual(u_boot_action.name, "uboot-overlay")
diff --git a/lava_dispatcher/pipeline/test/test_repeat.py b/lava_dispatcher/pipeline/test/test_repeat.py
index 17caecd..048993f 100644
--- a/lava_dispatcher/pipeline/test/test_repeat.py
+++ b/lava_dispatcher/pipeline/test/test_repeat.py
@@ -19,14 +19,13 @@
# with this program; if not, see <http://www.gnu.org/licenses>.
-import os
-import glob
import unittest
from lava_dispatcher.pipeline.actions.boot.qemu import BootQEMUImageAction
from lava_dispatcher.pipeline.actions.test.shell import TestShellRetry
from lava_dispatcher.pipeline.utils.filesystem import mkdtemp
from lava_dispatcher.pipeline.test.test_basic import Factory, pipeline_reference
from lava_dispatcher.pipeline.actions.deploy.testdef import get_deployment_testdefs
+from lava_dispatcher.pipeline.test.test_defs import allow_missing_path
class TestRepeatBootTest(unittest.TestCase): # pylint: disable=too-many-public-methods
@@ -40,7 +39,7 @@ class TestRepeatBootTest(unittest.TestCase): # pylint: disable=too-many-public-
def test_basic_structure(self):
self.assertIsNotNone(self.job)
- self.job.validate()
+ allow_missing_path(self.job.validate, self, 'qemu-system-x86_64')
self.assertEqual([], self.job.pipeline.errors)
description_ref = pipeline_reference('kvm-repeat.yaml')
self.assertEqual(description_ref, self.job.pipeline.describe(False))
diff --git a/lava_dispatcher/pipeline/test/test_uboot.py b/lava_dispatcher/pipeline/test/test_uboot.py
index 9186672..61a9e2c 100644
--- a/lava_dispatcher/pipeline/test/test_uboot.py
+++ b/lava_dispatcher/pipeline/test/test_uboot.py
@@ -294,7 +294,7 @@ class TestUbootAction(unittest.TestCase): # pylint: disable=too-many-public-met
output_dir='/tmp/')
job.validate()
sample_job_data.close()
- u_boot_media = [action for action in job.pipeline.actions if action.name == 'uboot-action'][1].internal_pipeline.actions[0]
+ u_boot_media = [action for action in job.pipeline.actions if action.name == 'uboot-action'][1].internal_pipeline.actions[1]
self.assertIsInstance(u_boot_media, UBootSecondaryMedia)
self.assertEqual([], u_boot_media.errors)
self.assertEqual(u_boot_media.parameters['kernel'], '/boot/vmlinuz-3.16.0-4-armmp-lpae')
@@ -356,6 +356,17 @@ class TestUbootAction(unittest.TestCase): # pylint: disable=too-many-public-met
self.assertIn('compression', nfs.parameters['nfsrootfs'])
self.assertEqual(nfs.parameters['nfsrootfs']['compression'], 'xz')
+ def test_prefix(self):
+ factory = Factory()
+ job = factory.create_bbb_job('sample_jobs/bbb-skip-install.yaml')
+ job.validate()
+ tftp_deploy = [action for action in job.pipeline.actions if action.name == 'tftp-deploy'][0]
+ prepare = [action for action in tftp_deploy.internal_pipeline.actions if action.name == 'prepare-tftp-overlay'][0]
+ nfs = [action for action in prepare.internal_pipeline.actions if action.name == 'extract-nfsrootfs'][0]
+ self.assertIn('prefix', nfs.parameters['nfsrootfs'])
+ self.assertEqual(nfs.parameters['nfsrootfs']['prefix'], 'jessie/')
+ self.assertEqual(nfs.param_key, 'nfsrootfs')
+
class TestOverlayCommands(unittest.TestCase): # pylint: disable=too-many-public-methods
diff --git a/lava_dispatcher/pipeline/test/test_utils.py b/lava_dispatcher/pipeline/test/test_utils.py
index c10f090..7ebb8f3 100644
--- a/lava_dispatcher/pipeline/test/test_utils.py
+++ b/lava_dispatcher/pipeline/test/test_utils.py
@@ -196,7 +196,7 @@ class TestConstants(unittest.TestCase): # pylint: disable=too-many-public-metho
uboot.parameters.get('parameters', {}).get('shutdown-message', SHUTDOWN_MESSAGE)
)
self.assertIsInstance(uboot, UBootAction)
- retry = uboot.internal_pipeline.actions[3]
+ retry = uboot.internal_pipeline.actions[4]
self.assertEqual(
"reboot: Restarting system", # modified in the job yaml
retry.parameters['parameters'].get('shutdown-message', SHUTDOWN_MESSAGE)
diff --git a/lava_dispatcher/pipeline/test/testdefs/params.yaml b/lava_dispatcher/pipeline/test/testdefs/params.yaml
new file mode 100644
index 0000000..e375fe1
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/testdefs/params.yaml
@@ -0,0 +1,44 @@
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: parameters-test
+ description: "test commands for Linux Linaro ubuntu Images with parameters"
+ maintainer:
+ - neil.williams@linaro.org
+ os:
+ - ubuntu
+ scope:
+ - functional
+ devices:
+ - kvm
+ - arndale
+ - panda
+ - beaglebone-black
+ - beagle-xm
+
+params:
+ VARIABLE_NAME_1: value_1
+ VARIABLE_NAME_2: value_2
+
+run:
+ steps:
+ - echo "test1a:" "pass"
+ - echo "test2a:" "fail"
+ - echo 'test3a:' 'skip'
+ - echo '"test4a:" "unknown"'
+ - lava-test-case echo1 --shell echo "test1b:" "pass"
+ - lava-test-case echo2 --shell echo "test2b:" "fail"
+ - lava-test-case echo3 --shell echo 'test3b:' 'skip'
+ - lava-test-case echo4 --shell echo '"test4b:" "unknown"'
+ - lava-test-case test3 --result pass
+ - lava-test-case test4 --result fail
+ - lava-test-case test5 --result pass --measurement 99 --units bottles
+ - lava-test-case test6 --result fail --measurement 0 --units mugs
+ - echo $VARIABLE_NAME_1
+ - echo $VARIABLE_NAME_2
+ - echo $SPACED_VAR
+ - echo $PUB_KEY
+ - ./scripts/linux-cpuinfo.sh
+ - ./scripts/result-params.py
+
+parse:
+ pattern: "(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))"
diff --git a/lava_dispatcher/pipeline/test/testdefs/result-data.txt b/lava_dispatcher/pipeline/test/testdefs/result-data.txt
new file mode 100644
index 0000000..fccd610
--- /dev/null
+++ b/lava_dispatcher/pipeline/test/testdefs/result-data.txt
@@ -0,0 +1,5 @@
+test1a: pass
+test2a: fail
+test3a: skip
+\"test4a:\" \"unknown\"
+
diff --git a/lava_dispatcher/pipeline/utils/constants.py b/lava_dispatcher/pipeline/utils/constants.py
index 2f94c6e..8ec92f9 100644
--- a/lava_dispatcher/pipeline/utils/constants.py
+++ b/lava_dispatcher/pipeline/utils/constants.py
@@ -21,6 +21,8 @@
# Overrides are only supported when and as declared in the comments for
# each constant.
+# pylint: disable=anomalous-backslash-in-string
+
# Delay between each character sent to the shell. This is required for some
# slow serial consoles.
SHELL_SEND_DELAY = 0.05
@@ -111,7 +113,7 @@ BOOTLOADER_DEFAULT_CMD_TIMEOUT = 90
GRUB_BOOT_PROMPT = "Press enter to boot the selected OS"
# Timeout for USB devices to settle and show up
-USB_SHOW_UP_TIMEOUT = 10
+USB_SHOW_UP_TIMEOUT = 20
# kernel boot monitoring
# Some successful kernel builds end the boot with this string
@@ -131,3 +133,10 @@ KERNEL_INIT_ALERT = 'ALERT! .* does not exist.\s+Dropping to a shell!'
# (i.e. size * 1024 * 1024)
INSTALLER_IMAGE_MAX_SIZE = 8 * 1024 # 8Gb
INSTALLER_QUIET_MSG = 'Loading initial ramdisk'
+
+# V1 compatibility
+DEFAULT_V1_PATTERN = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))"
+DEFAULT_V1_FIXUP = {'PASS': 'pass', 'FAIL': 'fail', 'SKIP': 'skip', 'UNKNOWN': 'unknown'}
+
+# Message for notifying completion of secondary deployment
+SECONDARY_DEPLOYMENT_MSG = "Secondary media deployment complete"
diff --git a/lava_dispatcher/pipeline/utils/filesystem.py b/lava_dispatcher/pipeline/utils/filesystem.py
index 3e9ef76..4cb310b 100644
--- a/lava_dispatcher/pipeline/utils/filesystem.py
+++ b/lava_dispatcher/pipeline/utils/filesystem.py
@@ -25,7 +25,10 @@ import tarfile
import tempfile
import guestfs
from configobj import ConfigObj
+
+from lava_dispatcher.pipeline.action import JobError
from lava_dispatcher.pipeline.utils.constants import LXC_PATH
+from lava_dispatcher.pipeline.utils.compression import decompress_file
def rmtree(directory):
@@ -183,6 +186,29 @@ def copy_out_files(image, filenames, destination):
guest.shutdown()
+def copy_in_overlay(image, root_partition, overlay):
+ """
+ Mounts test image partition as specified by the test
+ writer and extracts overlay at the root
+ """
+ guest = guestfs.GuestFS(python_return_dict=True)
+ guest.add_drive(image)
+ guest.launch()
+ partitions = guest.list_partitions()
+ if not partitions:
+ raise RuntimeError("Unable to prepare guestfs")
+ guest_partition = partitions[root_partition]
+ guest.mount(guest_partition, '/')
+ # FIXME: max message length issues when using tar_in
+ # on tar.gz. Works fine with tar so decompressing
+ # overlay first.
+ if os.path.exists(overlay[:-3]):
+ os.unlink(overlay[:-3])
+ decompressed_overlay = decompress_file(overlay, 'gz')
+ guest.tar_in(decompressed_overlay, '/')
+ guest.umount(guest_partition)
+
+
def copy_to_lxc(lxc_name, src):
"""Copies given file in SRC to lxc filesystem '/' with the provided
LXC_NAME and configured LXC_PATH
diff --git a/lava_dispatcher/pipeline/utils/installers.py b/lava_dispatcher/pipeline/utils/installers.py
index 854c86c..128641e 100644
--- a/lava_dispatcher/pipeline/utils/installers.py
+++ b/lava_dispatcher/pipeline/utils/installers.py
@@ -18,10 +18,19 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
-import os
import re
+def add_to_kickstart(preseedfile, extra_command):
+ with open(preseedfile, 'a') as pf:
+ pf.write('\n')
+ pf.write('%post\n')
+ pf.write('exec < /dev/console > /dev/console\n')
+ pf.write(extra_command + '\n')
+ pf.write('%end\n')
+ pf.close()
+
+
def add_late_command(preseedfile, extra_command):
added = False
append_line = " ; " + extra_command + "\n"
diff --git a/lava_dispatcher/pipeline/utils/messages.py b/lava_dispatcher/pipeline/utils/messages.py
index 937fb17..a0403aa 100644
--- a/lava_dispatcher/pipeline/utils/messages.py
+++ b/lava_dispatcher/pipeline/utils/messages.py
@@ -154,9 +154,11 @@ class LinuxKernelMessages(Action):
results = self.parse_failures(connection)
if len(results) > 1:
self.results = {'fail': results}
- else:
+ elif len(results) == 1:
self.results = {
'success': self.name,
'message': results[0]['message'] # the matching prompt
}
+ else:
+ self.results = {'result': 'skipped'}
return connection
diff --git a/lava_dispatcher/pipeline/utils/shell.py b/lava_dispatcher/pipeline/utils/shell.py
index 7d4a48e..4a6624c 100644
--- a/lava_dispatcher/pipeline/utils/shell.py
+++ b/lava_dispatcher/pipeline/utils/shell.py
@@ -89,7 +89,7 @@ def wait_for_prompt(connection, prompt_pattern, timeout, check_char):
logger = logging.getLogger('dispatcher')
logger.warning('%s: Sending %s in case of corruption. connection timeout %s, retry in %s',
exc, check_char, timeout, partial_timeout)
- logger.debug("pattern: %s" % prompt_pattern)
+ logger.debug("pattern: %s", prompt_pattern)
prompt_wait_count += 1
partial_timeout = timeout / 10
connection.sendline(check_char)
diff --git a/lava_dispatcher/pipeline/utils/vcs.py b/lava_dispatcher/pipeline/utils/vcs.py
index 27d6e9e..f91879b 100644
--- a/lava_dispatcher/pipeline/utils/vcs.py
+++ b/lava_dispatcher/pipeline/utils/vcs.py
@@ -46,18 +46,20 @@ class BzrHelper(VCSHelper):
def clone(self, dest_path, revision=None):
cwd = os.getcwd()
-
+ logger = logging.getLogger('dispatcher')
env = dict(os.environ)
env.update({'BZR_HOME': '/dev/null', 'BZR_LOG': '/dev/null'})
try:
if revision is not None:
+ logger.debug("Running '%s branch -r %s %s'", self.binary, str(revision), self.url)
subprocess.check_output([self.binary, 'branch', '-r',
str(revision), self.url,
dest_path],
stderr=subprocess.STDOUT, env=env)
commit_id = str(revision)
else:
+ logger.debug("Running '%s branch %s'", self.binary, self.url)
subprocess.check_output([self.binary, 'branch', self.url,
dest_path],
stderr=subprocess.STDOUT, env=env)
@@ -66,7 +68,6 @@ class BzrHelper(VCSHelper):
env=env).strip().decode('utf-8')
except subprocess.CalledProcessError as exc:
- logger = logging.getLogger('dispatcher')
if sys.version > '3':
exc_message = str(exc)
logger.exception({
@@ -104,11 +105,14 @@ class GitHelper(VCSHelper):
self.binary = '/usr/bin/git'
def clone(self, dest_path, revision=None):
+ logger = logging.getLogger('dispatcher')
try:
+ logger.debug("Running '%s clone %s'", self.binary, self.url)
subprocess.check_output([self.binary, 'clone', self.url, dest_path],
stderr=subprocess.STDOUT)
if revision is not None:
+ logger.debug("Running '%s checkout %s", self.binary, str(revision))
subprocess.check_output([self.binary, '--git-dir',
os.path.join(dest_path, '.git'),
'checkout', str(revision)],
diff --git a/setup.py b/setup.py
index 5da48c7..d7cad6b 100755
--- a/setup.py
+++ b/setup.py
@@ -65,7 +65,9 @@ setup(
'lava_test_shell/distro/ubuntu/*',
'lava_test_shell/distro/debian/*',
'lava_test_shell/distro/oe/*',
+ 'pipeline/lava_test_shell/lava-test-case',
'pipeline/lava_test_shell/lava-test-runner',
+ 'pipeline/lava_test_shell/multi_node/*',
],
'linaro_dashboard_bundle': [
'schemas/*',