Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame^] | 1 | # test result tool - report text based test results |
| 2 | # |
| 3 | # Copyright (c) 2019, Intel Corporation. |
| 4 | # Copyright (c) 2019, Linux Foundation |
| 5 | # |
| 6 | # This program is free software; you can redistribute it and/or modify it |
| 7 | # under the terms and conditions of the GNU General Public License, |
| 8 | # version 2, as published by the Free Software Foundation. |
| 9 | # |
| 10 | # This program is distributed in the hope it will be useful, but WITHOUT |
| 11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | # more details. |
| 14 | # |
| 15 | import os |
| 16 | import glob |
| 17 | import json |
| 18 | import resulttool.resultutils as resultutils |
| 19 | from oeqa.utils.git import GitRepo |
| 20 | import oeqa.utils.gitarchive as gitarchive |
| 21 | |
| 22 | |
| 23 | class ResultsTextReport(object): |
| 24 | def __init__(self): |
| 25 | self.ptests = {} |
| 26 | self.result_types = {'passed': ['PASSED', 'passed'], |
| 27 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], |
| 28 | 'skipped': ['SKIPPED', 'skipped']} |
| 29 | |
| 30 | |
| 31 | def handle_ptest_result(self, k, status, result): |
| 32 | if k == 'ptestresult.sections': |
| 33 | # Ensure tests without any test results still show up on the report |
| 34 | for suite in result['ptestresult.sections']: |
| 35 | if suite not in self.ptests: |
| 36 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 37 | if 'duration' in result['ptestresult.sections'][suite]: |
| 38 | self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration'] |
| 39 | if 'timeout' in result['ptestresult.sections'][suite]: |
| 40 | self.ptests[suite]['duration'] += " T" |
| 41 | return |
| 42 | try: |
| 43 | _, suite, test = k.split(".", 2) |
| 44 | except ValueError: |
| 45 | return |
| 46 | # Handle 'glib-2.0' |
| 47 | if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']: |
| 48 | try: |
| 49 | _, suite, suite1, test = k.split(".", 3) |
| 50 | if suite + "." + suite1 in result['ptestresult.sections']: |
| 51 | suite = suite + "." + suite1 |
| 52 | except ValueError: |
| 53 | pass |
| 54 | if suite not in self.ptests: |
| 55 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 56 | for tk in self.result_types: |
| 57 | if status in self.result_types[tk]: |
| 58 | self.ptests[suite][tk] += 1 |
| 59 | |
| 60 | def get_aggregated_test_result(self, logger, testresult): |
| 61 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 62 | result = testresult.get('result', []) |
| 63 | for k in result: |
| 64 | test_status = result[k].get('status', []) |
| 65 | for tk in self.result_types: |
| 66 | if test_status in self.result_types[tk]: |
| 67 | test_count_report[tk] += 1 |
| 68 | if test_status in self.result_types['failed']: |
| 69 | test_count_report['failed_testcases'].append(k) |
| 70 | if k.startswith("ptestresult."): |
| 71 | self.handle_ptest_result(k, test_status, result) |
| 72 | return test_count_report |
| 73 | |
| 74 | def print_test_report(self, template_file_name, test_count_reports): |
| 75 | from jinja2 import Environment, FileSystemLoader |
| 76 | script_path = os.path.dirname(os.path.realpath(__file__)) |
| 77 | file_loader = FileSystemLoader(script_path + '/template') |
| 78 | env = Environment(loader=file_loader, trim_blocks=True) |
| 79 | template = env.get_template(template_file_name) |
| 80 | havefailed = False |
| 81 | haveptest = bool(self.ptests) |
| 82 | reportvalues = [] |
| 83 | cols = ['passed', 'failed', 'skipped'] |
| 84 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } |
| 85 | for line in test_count_reports: |
| 86 | total_tested = line['passed'] + line['failed'] + line['skipped'] |
| 87 | vals = {} |
| 88 | vals['result_id'] = line['result_id'] |
| 89 | vals['testseries'] = line['testseries'] |
| 90 | vals['sort'] = line['testseries'] + "_" + line['result_id'] |
| 91 | vals['failed_testcases'] = line['failed_testcases'] |
| 92 | for k in cols: |
| 93 | vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) |
| 94 | for k in maxlen: |
| 95 | if k in vals and len(vals[k]) > maxlen[k]: |
| 96 | maxlen[k] = len(vals[k]) |
| 97 | reportvalues.append(vals) |
| 98 | if line['failed_testcases']: |
| 99 | havefailed = True |
| 100 | for ptest in self.ptests: |
| 101 | if len(ptest) > maxlen['ptest']: |
| 102 | maxlen['ptest'] = len(ptest) |
| 103 | output = template.render(reportvalues=reportvalues, |
| 104 | havefailed=havefailed, |
| 105 | haveptest=haveptest, |
| 106 | ptests=self.ptests, |
| 107 | maxlen=maxlen) |
| 108 | print(output) |
| 109 | |
| 110 | def view_test_report(self, logger, source_dir, branch, commit, tag): |
| 111 | test_count_reports = [] |
| 112 | if commit: |
| 113 | if tag: |
| 114 | logger.warning("Ignoring --tag as --commit was specified") |
| 115 | tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" |
| 116 | repo = GitRepo(source_dir) |
| 117 | revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) |
| 118 | rev_index = gitarchive.rev_find(revs, 'commit', commit) |
| 119 | testresults = resultutils.git_get_result(repo, revs[rev_index][2]) |
| 120 | elif tag: |
| 121 | repo = GitRepo(source_dir) |
| 122 | testresults = resultutils.git_get_result(repo, [tag]) |
| 123 | else: |
| 124 | testresults = resultutils.load_resultsdata(source_dir) |
| 125 | for testsuite in testresults: |
| 126 | for resultid in testresults[testsuite]: |
| 127 | result = testresults[testsuite][resultid] |
| 128 | test_count_report = self.get_aggregated_test_result(logger, result) |
| 129 | test_count_report['testseries'] = result['configuration']['TESTSERIES'] |
| 130 | test_count_report['result_id'] = resultid |
| 131 | test_count_reports.append(test_count_report) |
| 132 | self.print_test_report('test_report_full_text.txt', test_count_reports) |
| 133 | |
| 134 | def report(args, logger): |
| 135 | report = ResultsTextReport() |
| 136 | report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag) |
| 137 | return 0 |
| 138 | |
| 139 | def register_commands(subparsers): |
| 140 | """Register subcommands from this plugin""" |
| 141 | parser_build = subparsers.add_parser('report', help='summarise test results', |
| 142 | description='print a text-based summary of the test results', |
| 143 | group='analysis') |
| 144 | parser_build.set_defaults(func=report) |
| 145 | parser_build.add_argument('source_dir', |
| 146 | help='source file/directory that contain the test result files to summarise') |
| 147 | parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in") |
| 148 | parser_build.add_argument('--commit', help="Revision to report") |
| 149 | parser_build.add_argument('-t', '--tag', default='', |
| 150 | help='source_dir is a git repository, report on the tag specified from that repository') |