Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 1 | # test result tool - report text based test results |
| 2 | # |
| 3 | # Copyright (c) 2019, Intel Corporation. |
| 4 | # Copyright (c) 2019, Linux Foundation |
| 5 | # |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 6 | # SPDX-License-Identifier: GPL-2.0-only |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 7 | # |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 8 | |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 9 | import os |
| 10 | import glob |
| 11 | import json |
| 12 | import resulttool.resultutils as resultutils |
| 13 | from oeqa.utils.git import GitRepo |
| 14 | import oeqa.utils.gitarchive as gitarchive |
| 15 | |
| 16 | |
| 17 | class ResultsTextReport(object): |
| 18 | def __init__(self): |
| 19 | self.ptests = {} |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 20 | self.ltptests = {} |
| 21 | self.ltpposixtests = {} |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 22 | self.result_types = {'passed': ['PASSED', 'passed'], |
| 23 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], |
| 24 | 'skipped': ['SKIPPED', 'skipped']} |
| 25 | |
| 26 | |
| 27 | def handle_ptest_result(self, k, status, result): |
| 28 | if k == 'ptestresult.sections': |
| 29 | # Ensure tests without any test results still show up on the report |
| 30 | for suite in result['ptestresult.sections']: |
| 31 | if suite not in self.ptests: |
| 32 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 33 | if 'duration' in result['ptestresult.sections'][suite]: |
| 34 | self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration'] |
| 35 | if 'timeout' in result['ptestresult.sections'][suite]: |
| 36 | self.ptests[suite]['duration'] += " T" |
| 37 | return |
| 38 | try: |
| 39 | _, suite, test = k.split(".", 2) |
| 40 | except ValueError: |
| 41 | return |
| 42 | # Handle 'glib-2.0' |
| 43 | if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']: |
| 44 | try: |
| 45 | _, suite, suite1, test = k.split(".", 3) |
| 46 | if suite + "." + suite1 in result['ptestresult.sections']: |
| 47 | suite = suite + "." + suite1 |
| 48 | except ValueError: |
| 49 | pass |
| 50 | if suite not in self.ptests: |
| 51 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 52 | for tk in self.result_types: |
| 53 | if status in self.result_types[tk]: |
| 54 | self.ptests[suite][tk] += 1 |
| 55 | |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 56 | def handle_ltptest_result(self, k, status, result): |
| 57 | if k == 'ltpresult.sections': |
| 58 | # Ensure tests without any test results still show up on the report |
| 59 | for suite in result['ltpresult.sections']: |
| 60 | if suite not in self.ltptests: |
| 61 | self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 62 | if 'duration' in result['ltpresult.sections'][suite]: |
| 63 | self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration'] |
| 64 | if 'timeout' in result['ltpresult.sections'][suite]: |
| 65 | self.ltptests[suite]['duration'] += " T" |
| 66 | return |
| 67 | try: |
| 68 | _, suite, test = k.split(".", 2) |
| 69 | except ValueError: |
| 70 | return |
| 71 | # Handle 'glib-2.0' |
| 72 | if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']: |
| 73 | try: |
| 74 | _, suite, suite1, test = k.split(".", 3) |
| 75 | print("split2: %s %s %s" % (suite, suite1, test)) |
| 76 | if suite + "." + suite1 in result['ltpresult.sections']: |
| 77 | suite = suite + "." + suite1 |
| 78 | except ValueError: |
| 79 | pass |
| 80 | if suite not in self.ltptests: |
| 81 | self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 82 | for tk in self.result_types: |
| 83 | if status in self.result_types[tk]: |
| 84 | self.ltptests[suite][tk] += 1 |
| 85 | |
| 86 | def handle_ltpposixtest_result(self, k, status, result): |
| 87 | if k == 'ltpposixresult.sections': |
| 88 | # Ensure tests without any test results still show up on the report |
| 89 | for suite in result['ltpposixresult.sections']: |
| 90 | if suite not in self.ltpposixtests: |
| 91 | self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 92 | if 'duration' in result['ltpposixresult.sections'][suite]: |
| 93 | self.ltpposixtests[suite]['duration'] = result['ltpposixresult.sections'][suite]['duration'] |
| 94 | return |
| 95 | try: |
| 96 | _, suite, test = k.split(".", 2) |
| 97 | except ValueError: |
| 98 | return |
| 99 | # Handle 'glib-2.0' |
| 100 | if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']: |
| 101 | try: |
| 102 | _, suite, suite1, test = k.split(".", 3) |
| 103 | if suite + "." + suite1 in result['ltpposixresult.sections']: |
| 104 | suite = suite + "." + suite1 |
| 105 | except ValueError: |
| 106 | pass |
| 107 | if suite not in self.ltpposixtests: |
| 108 | self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} |
| 109 | for tk in self.result_types: |
| 110 | if status in self.result_types[tk]: |
| 111 | self.ltpposixtests[suite][tk] += 1 |
| 112 | |
| 113 | def get_aggregated_test_result(self, logger, testresult): |
| 114 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 115 | def get_aggregated_test_result(self, logger, testresult): |
| 116 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 117 | def get_aggregated_test_result(self, logger, testresult): |
| 118 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 119 | def get_aggregated_test_result(self, logger, testresult): |
| 120 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 121 | result = testresult.get('result', []) |
| 122 | for k in result: |
| 123 | test_status = result[k].get('status', []) |
| 124 | for tk in self.result_types: |
| 125 | if test_status in self.result_types[tk]: |
| 126 | test_count_report[tk] += 1 |
| 127 | if test_status in self.result_types['failed']: |
| 128 | test_count_report['failed_testcases'].append(k) |
| 129 | if k.startswith("ptestresult."): |
| 130 | self.handle_ptest_result(k, test_status, result) |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 131 | if k.startswith("ltpresult."): |
| 132 | self.handle_ltptest_result(k, test_status, result) |
| 133 | if k.startswith("ltpposixresult."): |
| 134 | self.handle_ltpposixtest_result(k, test_status, result) |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 135 | return test_count_report |
| 136 | |
| 137 | def print_test_report(self, template_file_name, test_count_reports): |
| 138 | from jinja2 import Environment, FileSystemLoader |
| 139 | script_path = os.path.dirname(os.path.realpath(__file__)) |
| 140 | file_loader = FileSystemLoader(script_path + '/template') |
| 141 | env = Environment(loader=file_loader, trim_blocks=True) |
| 142 | template = env.get_template(template_file_name) |
| 143 | havefailed = False |
| 144 | haveptest = bool(self.ptests) |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 145 | haveltp = bool(self.ltptests) |
| 146 | haveltpposix = bool(self.ltpposixtests) |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 147 | reportvalues = [] |
| 148 | cols = ['passed', 'failed', 'skipped'] |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 149 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0} |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 150 | for line in test_count_reports: |
| 151 | total_tested = line['passed'] + line['failed'] + line['skipped'] |
| 152 | vals = {} |
| 153 | vals['result_id'] = line['result_id'] |
| 154 | vals['testseries'] = line['testseries'] |
| 155 | vals['sort'] = line['testseries'] + "_" + line['result_id'] |
| 156 | vals['failed_testcases'] = line['failed_testcases'] |
| 157 | for k in cols: |
| 158 | vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) |
| 159 | for k in maxlen: |
| 160 | if k in vals and len(vals[k]) > maxlen[k]: |
| 161 | maxlen[k] = len(vals[k]) |
| 162 | reportvalues.append(vals) |
| 163 | if line['failed_testcases']: |
| 164 | havefailed = True |
| 165 | for ptest in self.ptests: |
| 166 | if len(ptest) > maxlen['ptest']: |
| 167 | maxlen['ptest'] = len(ptest) |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 168 | for ltptest in self.ltptests: |
| 169 | if len(ltptest) > maxlen['ltptest']: |
| 170 | maxlen['ltptest'] = len(ltptest) |
| 171 | for ltpposixtest in self.ltpposixtests: |
| 172 | if len(ltpposixtest) > maxlen['ltpposixtest']: |
| 173 | maxlen['ltpposixtest'] = len(ltpposixtest) |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 174 | output = template.render(reportvalues=reportvalues, |
| 175 | havefailed=havefailed, |
| 176 | haveptest=haveptest, |
| 177 | ptests=self.ptests, |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 178 | haveltp=haveltp, |
| 179 | haveltpposix=haveltpposix, |
| 180 | ltptests=self.ltptests, |
| 181 | ltpposixtests=self.ltpposixtests, |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 182 | maxlen=maxlen) |
| 183 | print(output) |
| 184 | |
| 185 | def view_test_report(self, logger, source_dir, branch, commit, tag): |
| 186 | test_count_reports = [] |
| 187 | if commit: |
| 188 | if tag: |
| 189 | logger.warning("Ignoring --tag as --commit was specified") |
| 190 | tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" |
| 191 | repo = GitRepo(source_dir) |
| 192 | revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) |
| 193 | rev_index = gitarchive.rev_find(revs, 'commit', commit) |
| 194 | testresults = resultutils.git_get_result(repo, revs[rev_index][2]) |
| 195 | elif tag: |
| 196 | repo = GitRepo(source_dir) |
| 197 | testresults = resultutils.git_get_result(repo, [tag]) |
| 198 | else: |
| 199 | testresults = resultutils.load_resultsdata(source_dir) |
| 200 | for testsuite in testresults: |
| 201 | for resultid in testresults[testsuite]: |
| 202 | result = testresults[testsuite][resultid] |
| 203 | test_count_report = self.get_aggregated_test_result(logger, result) |
| 204 | test_count_report['testseries'] = result['configuration']['TESTSERIES'] |
| 205 | test_count_report['result_id'] = resultid |
| 206 | test_count_reports.append(test_count_report) |
| 207 | self.print_test_report('test_report_full_text.txt', test_count_reports) |
| 208 | |
| 209 | def report(args, logger): |
| 210 | report = ResultsTextReport() |
| 211 | report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag) |
| 212 | return 0 |
| 213 | |
| 214 | def register_commands(subparsers): |
| 215 | """Register subcommands from this plugin""" |
| 216 | parser_build = subparsers.add_parser('report', help='summarise test results', |
| 217 | description='print a text-based summary of the test results', |
| 218 | group='analysis') |
| 219 | parser_build.set_defaults(func=report) |
| 220 | parser_build.add_argument('source_dir', |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame^] | 221 | help='source file/directory/URL that contain the test result files to summarise') |
Brad Bishop | 40320b1 | 2019-03-26 16:08:25 -0400 | [diff] [blame] | 222 | parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in") |
| 223 | parser_build.add_argument('--commit', help="Revision to report") |
| 224 | parser_build.add_argument('-t', '--tag', default='', |
| 225 | help='source_dir is a git repository, report on the tag specified from that repository') |