summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSjoerd Simons <sjoerd.simons@collabora.co.uk>2015-04-17 08:58:52 (GMT)
committerSjoerd Simons <sjoerd.simons@collabora.co.uk>2015-04-17 08:58:52 (GMT)
commit1bfacb66a63f0d519a6afd537ecefbb4c8d21a6d (patch)
tree0803c9dc6ed502c1bb203e3c1506f1e2d6c3e8c2
parenta597e01f2b65bcf4ffe35b3c869f605f2d292652 (diff)
downloadlqa-add-analyze.tar.gz
lqa-add-analyze.tar.xz
-rw-r--r--lqa_tool/__init__.py13
-rw-r--r--lqa_tool/analyse_cmd.py109
2 files changed, 122 insertions, 0 deletions
diff --git a/lqa_tool/__init__.py b/lqa_tool/__init__.py
index 5c7ca7c..d299e7e 100644
--- a/lqa_tool/__init__.py
+++ b/lqa_tool/__init__.py
@@ -32,6 +32,7 @@ import lqa_tool.utils
from lqa_tool.version import __version__
from lqa_tool.settings import Settings
from lqa_tool.base_cmd import BaseCmd
+from lqa_tool.analyse_cmd import AnalyseCmd
class Profiles(object):
"""Initialize profiles for command objects.
@@ -84,6 +85,18 @@ See 'lqa <command> -h' to read about a specific subcommand.
help="set configuration file")
parser.add_argument('--log-file', type=str, help="set the log file")
+ def analyse(self):
+ parser = ArgumentParser(
+ description='Analyse job id results',
+ usage='''lqa analyse [<args>] [JOB_ID ...]''')
+ # Add common arguments
+ self.add_common_arguments(parser)
+ # NOT prefixing the argument with -- means it's not optional
+ parser.add_argument('job_id', type=str,
+ metavar='JOB_ID', help='job id list')
+ args = parser.parse_args(sys.argv[2:])
+ AnalyseCmd(args).run()
+
def submit(self):
parser = ArgumentParser(
description='Submit job files',
diff --git a/lqa_tool/analyse_cmd.py b/lqa_tool/analyse_cmd.py
new file mode 100644
index 0000000..f1c84e7
--- /dev/null
+++ b/lqa_tool/analyse_cmd.py
@@ -0,0 +1,109 @@
+import os.path
+import json
+from settings import Settings
+from base_cmd import BaseCmd
+
+class AnalyseCmd(BaseCmd):
+ """Lava analyse sub-command object.
+
+ :param args: The parsed arguments as returned by argparse.
+ """
+
+ def __init__(self, args):
+ BaseCmd.__init__(self, args)
+
+ def fetch_job(self, job_id):
+ try:
+ status = self.server.scheduler.job_status(job_id)
+ details = self.server.scheduler.job_details(job_id)
+ return [ status, details ]
+ except Fault as e:
+ self.settings.logger.error(
+ "fetching job information {}: '{}'".format(job_id, e))
+ exit(1)
+ except EnvironmentError as e:
+ self.settings.logger.error(e)
+ exit(1)
+
+ def fetch_bundle(self, sha1):
+ """Fetch the bundle for sha1"""
+ b = self.server.dashboard.get(sha1)
+ return json.loads(b['content'])
+
+ def p(self, *args):
+ self.settings.logger.info(*args)
+
+ def print_test_run(self, run):
+ result = {
+ "pass": [],
+ "fail": [],
+ "skip": [],
+ "unknown": [] }
+
+ for c in run['test_results']:
+ result[c['result']] += [c['test_case_id']]
+
+ total_runs = len(run['test_results'])
+ passed = len(result['pass'])
+ failed = len(result['fail'])
+ skipped = len(result['skip'])
+ unknown = len(result['unknown'])
+
+ assert((passed + failed + skipped + unknown) == total_runs)
+
+ if passed == total_runs:
+ self.p("- Test run: {}: All passed \o/".format(run['test_id']))
+ return
+
+ self.p("- Test run: {}: {}p {}f {}s {}u".format(run['test_id'], passed, failed,
+ skipped, unknown))
+
+ for c in result['fail']:
+ self.p("-- FAILED: {}".format(c))
+ for c in result['skip']:
+ self.p("-- SKIPPED: {}".format(c))
+ for c in result['unknown']:
+ self.p("-- unknown: {}".format(c))
+
+
+ def run(self):
+ [ status, details ] = self.fetch_job(self.args.job_id)
+
+ #print status
+ #print details['definition']
+
+ self.p("Job: {} - {}".format(details['id'], details['description']))
+ self.p("=" * 79 + "\n")
+ if status['bundle_sha1'] == "":
+ self.p("No result, job status: '{}'".format(status['job_status']))
+ return
+
+ if status['job_status'] != 'Complete':
+ self.p("* Final job status: '{}'. Results are not complete!".format(status['job_status']))
+ else:
+ self.p("* Job ran successfully")
+
+ definition = details['definition']
+ j = json.loads(definition)
+
+ tests = 0
+ for a in j['actions']:
+ if a['command'] == 'lava_test_shell':
+ p = a['parameters']
+ tests += len(p.get('testdef_repos', []))
+ tests += len(p.get('testdef_urls', []))
+ self.p("* Tests in definition: {}".format(tests))
+
+ bundle = self.fetch_bundle(status['bundle_sha1'])
+ results = len(bundle['test_runs'])
+
+ # All defined tests + the lava metatest
+ if results == tests + 1:
+ self.p("* Reports available for all test runs\n")
+ else:
+ self.p("* Not all test results were reported! " +
+ "Only {} reports\n".format(results))
+
+ for r in bundle['test_runs']:
+ self.print_test_run (r)
+