blob: f706280aa72a601750b12e4640f22273e6abf10b [file] [log] [blame]
Brad Bishop40320b12019-03-26 16:08:25 -04001# test result tool - report text based test results
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
Brad Bishopc342db32019-05-15 21:57:59 -04006# SPDX-License-Identifier: GPL-2.0-only
Brad Bishop40320b12019-03-26 16:08:25 -04007#
Brad Bishopc342db32019-05-15 21:57:59 -04008
Brad Bishop40320b12019-03-26 16:08:25 -04009import os
10import glob
11import json
12import resulttool.resultutils as resultutils
13from oeqa.utils.git import GitRepo
14import oeqa.utils.gitarchive as gitarchive
15
16
17class ResultsTextReport(object):
18 def __init__(self):
19 self.ptests = {}
Brad Bishopc342db32019-05-15 21:57:59 -040020 self.ltptests = {}
21 self.ltpposixtests = {}
Brad Bishop40320b12019-03-26 16:08:25 -040022 self.result_types = {'passed': ['PASSED', 'passed'],
23 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
24 'skipped': ['SKIPPED', 'skipped']}
25
26
Brad Bishop15ae2502019-06-18 21:44:24 -040027 def handle_ptest_result(self, k, status, result, machine):
28 if machine not in self.ptests:
29 self.ptests[machine] = {}
30
Brad Bishop40320b12019-03-26 16:08:25 -040031 if k == 'ptestresult.sections':
32 # Ensure tests without any test results still show up on the report
33 for suite in result['ptestresult.sections']:
Brad Bishop15ae2502019-06-18 21:44:24 -040034 if suite not in self.ptests[machine]:
35 self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishop40320b12019-03-26 16:08:25 -040036 if 'duration' in result['ptestresult.sections'][suite]:
Brad Bishop15ae2502019-06-18 21:44:24 -040037 self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
Brad Bishop40320b12019-03-26 16:08:25 -040038 if 'timeout' in result['ptestresult.sections'][suite]:
Brad Bishop15ae2502019-06-18 21:44:24 -040039 self.ptests[machine][suite]['duration'] += " T"
Brad Bishop40320b12019-03-26 16:08:25 -040040 return
41 try:
42 _, suite, test = k.split(".", 2)
43 except ValueError:
44 return
45 # Handle 'glib-2.0'
46 if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
47 try:
48 _, suite, suite1, test = k.split(".", 3)
49 if suite + "." + suite1 in result['ptestresult.sections']:
50 suite = suite + "." + suite1
51 except ValueError:
52 pass
Brad Bishop15ae2502019-06-18 21:44:24 -040053 if suite not in self.ptests[machine]:
54 self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishop40320b12019-03-26 16:08:25 -040055 for tk in self.result_types:
56 if status in self.result_types[tk]:
Brad Bishop15ae2502019-06-18 21:44:24 -040057 self.ptests[machine][suite][tk] += 1
Brad Bishop40320b12019-03-26 16:08:25 -040058
Brad Bishop15ae2502019-06-18 21:44:24 -040059 def handle_ltptest_result(self, k, status, result, machine):
60 if machine not in self.ltptests:
61 self.ltptests[machine] = {}
62
Brad Bishopc342db32019-05-15 21:57:59 -040063 if k == 'ltpresult.sections':
64 # Ensure tests without any test results still show up on the report
65 for suite in result['ltpresult.sections']:
Brad Bishop15ae2502019-06-18 21:44:24 -040066 if suite not in self.ltptests[machine]:
67 self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishopc342db32019-05-15 21:57:59 -040068 if 'duration' in result['ltpresult.sections'][suite]:
Brad Bishop15ae2502019-06-18 21:44:24 -040069 self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
Brad Bishopc342db32019-05-15 21:57:59 -040070 if 'timeout' in result['ltpresult.sections'][suite]:
Brad Bishop15ae2502019-06-18 21:44:24 -040071 self.ltptests[machine][suite]['duration'] += " T"
Brad Bishopc342db32019-05-15 21:57:59 -040072 return
73 try:
74 _, suite, test = k.split(".", 2)
75 except ValueError:
76 return
77 # Handle 'glib-2.0'
78 if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
79 try:
80 _, suite, suite1, test = k.split(".", 3)
81 print("split2: %s %s %s" % (suite, suite1, test))
82 if suite + "." + suite1 in result['ltpresult.sections']:
83 suite = suite + "." + suite1
84 except ValueError:
85 pass
Brad Bishop15ae2502019-06-18 21:44:24 -040086 if suite not in self.ltptests[machine]:
87 self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishopc342db32019-05-15 21:57:59 -040088 for tk in self.result_types:
89 if status in self.result_types[tk]:
Brad Bishop15ae2502019-06-18 21:44:24 -040090 self.ltptests[machine][suite][tk] += 1
Brad Bishopc342db32019-05-15 21:57:59 -040091
Brad Bishop15ae2502019-06-18 21:44:24 -040092 def handle_ltpposixtest_result(self, k, status, result, machine):
93 if machine not in self.ltpposixtests:
94 self.ltpposixtests[machine] = {}
95
Brad Bishopc342db32019-05-15 21:57:59 -040096 if k == 'ltpposixresult.sections':
97 # Ensure tests without any test results still show up on the report
98 for suite in result['ltpposixresult.sections']:
Brad Bishop15ae2502019-06-18 21:44:24 -040099 if suite not in self.ltpposixtests[machine]:
100 self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishopc342db32019-05-15 21:57:59 -0400101 if 'duration' in result['ltpposixresult.sections'][suite]:
Brad Bishop15ae2502019-06-18 21:44:24 -0400102 self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
Brad Bishopc342db32019-05-15 21:57:59 -0400103 return
104 try:
105 _, suite, test = k.split(".", 2)
106 except ValueError:
107 return
108 # Handle 'glib-2.0'
109 if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
110 try:
111 _, suite, suite1, test = k.split(".", 3)
112 if suite + "." + suite1 in result['ltpposixresult.sections']:
113 suite = suite + "." + suite1
114 except ValueError:
115 pass
Brad Bishop15ae2502019-06-18 21:44:24 -0400116 if suite not in self.ltpposixtests[machine]:
117 self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
Brad Bishopc342db32019-05-15 21:57:59 -0400118 for tk in self.result_types:
119 if status in self.result_types[tk]:
Brad Bishop15ae2502019-06-18 21:44:24 -0400120 self.ltpposixtests[machine][suite][tk] += 1
Brad Bishopc342db32019-05-15 21:57:59 -0400121
Brad Bishop15ae2502019-06-18 21:44:24 -0400122 def get_aggregated_test_result(self, logger, testresult, machine):
Brad Bishop40320b12019-03-26 16:08:25 -0400123 test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
124 result = testresult.get('result', [])
125 for k in result:
126 test_status = result[k].get('status', [])
127 for tk in self.result_types:
128 if test_status in self.result_types[tk]:
129 test_count_report[tk] += 1
130 if test_status in self.result_types['failed']:
131 test_count_report['failed_testcases'].append(k)
132 if k.startswith("ptestresult."):
Brad Bishop15ae2502019-06-18 21:44:24 -0400133 self.handle_ptest_result(k, test_status, result, machine)
Brad Bishopc342db32019-05-15 21:57:59 -0400134 if k.startswith("ltpresult."):
Brad Bishop15ae2502019-06-18 21:44:24 -0400135 self.handle_ltptest_result(k, test_status, result, machine)
Brad Bishopc342db32019-05-15 21:57:59 -0400136 if k.startswith("ltpposixresult."):
Brad Bishop15ae2502019-06-18 21:44:24 -0400137 self.handle_ltpposixtest_result(k, test_status, result, machine)
Brad Bishop40320b12019-03-26 16:08:25 -0400138 return test_count_report
139
140 def print_test_report(self, template_file_name, test_count_reports):
141 from jinja2 import Environment, FileSystemLoader
142 script_path = os.path.dirname(os.path.realpath(__file__))
143 file_loader = FileSystemLoader(script_path + '/template')
144 env = Environment(loader=file_loader, trim_blocks=True)
145 template = env.get_template(template_file_name)
146 havefailed = False
Brad Bishop40320b12019-03-26 16:08:25 -0400147 reportvalues = []
Brad Bishop15ae2502019-06-18 21:44:24 -0400148 machines = []
Brad Bishop40320b12019-03-26 16:08:25 -0400149 cols = ['passed', 'failed', 'skipped']
Brad Bishopc342db32019-05-15 21:57:59 -0400150 maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
Brad Bishop40320b12019-03-26 16:08:25 -0400151 for line in test_count_reports:
152 total_tested = line['passed'] + line['failed'] + line['skipped']
153 vals = {}
154 vals['result_id'] = line['result_id']
155 vals['testseries'] = line['testseries']
156 vals['sort'] = line['testseries'] + "_" + line['result_id']
157 vals['failed_testcases'] = line['failed_testcases']
158 for k in cols:
159 vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
160 for k in maxlen:
161 if k in vals and len(vals[k]) > maxlen[k]:
162 maxlen[k] = len(vals[k])
163 reportvalues.append(vals)
164 if line['failed_testcases']:
165 havefailed = True
Brad Bishop15ae2502019-06-18 21:44:24 -0400166 if line['machine'] not in machines:
167 machines.append(line['machine'])
168 for (machine, report) in self.ptests.items():
169 for ptest in self.ptests[machine]:
170 if len(ptest) > maxlen['ptest']:
171 maxlen['ptest'] = len(ptest)
172 for (machine, report) in self.ltptests.items():
173 for ltptest in self.ltptests[machine]:
174 if len(ltptest) > maxlen['ltptest']:
175 maxlen['ltptest'] = len(ltptest)
176 for (machine, report) in self.ltpposixtests.items():
177 for ltpposixtest in self.ltpposixtests[machine]:
178 if len(ltpposixtest) > maxlen['ltpposixtest']:
179 maxlen['ltpposixtest'] = len(ltpposixtest)
Brad Bishop40320b12019-03-26 16:08:25 -0400180 output = template.render(reportvalues=reportvalues,
181 havefailed=havefailed,
Brad Bishop15ae2502019-06-18 21:44:24 -0400182 machines=machines,
Brad Bishop40320b12019-03-26 16:08:25 -0400183 ptests=self.ptests,
Brad Bishopc342db32019-05-15 21:57:59 -0400184 ltptests=self.ltptests,
185 ltpposixtests=self.ltpposixtests,
Brad Bishop40320b12019-03-26 16:08:25 -0400186 maxlen=maxlen)
187 print(output)
188
189 def view_test_report(self, logger, source_dir, branch, commit, tag):
190 test_count_reports = []
191 if commit:
192 if tag:
193 logger.warning("Ignoring --tag as --commit was specified")
194 tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
195 repo = GitRepo(source_dir)
196 revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
197 rev_index = gitarchive.rev_find(revs, 'commit', commit)
198 testresults = resultutils.git_get_result(repo, revs[rev_index][2])
199 elif tag:
200 repo = GitRepo(source_dir)
201 testresults = resultutils.git_get_result(repo, [tag])
202 else:
203 testresults = resultutils.load_resultsdata(source_dir)
204 for testsuite in testresults:
205 for resultid in testresults[testsuite]:
Brad Bishopc68388fc2019-08-26 01:33:31 -0400206 skip = False
Brad Bishop40320b12019-03-26 16:08:25 -0400207 result = testresults[testsuite][resultid]
Brad Bishop15ae2502019-06-18 21:44:24 -0400208 machine = result['configuration']['MACHINE']
Brad Bishopc68388fc2019-08-26 01:33:31 -0400209
210 # Check to see if there is already results for these kinds of tests for the machine
211 for key in result['result'].keys():
212 testtype = str(key).split('.')[0]
213 if ((machine in self.ptests and testtype == "ptestresult" and self.ptests[machine]) or
214 (machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
215 (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
216 print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
217 skip = True
218 break
219 if skip:
220 break
221
Brad Bishop15ae2502019-06-18 21:44:24 -0400222 test_count_report = self.get_aggregated_test_result(logger, result, machine)
223 test_count_report['machine'] = machine
Brad Bishop40320b12019-03-26 16:08:25 -0400224 test_count_report['testseries'] = result['configuration']['TESTSERIES']
225 test_count_report['result_id'] = resultid
226 test_count_reports.append(test_count_report)
227 self.print_test_report('test_report_full_text.txt', test_count_reports)
228
229def report(args, logger):
230 report = ResultsTextReport()
231 report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
232 return 0
233
234def register_commands(subparsers):
235 """Register subcommands from this plugin"""
236 parser_build = subparsers.add_parser('report', help='summarise test results',
237 description='print a text-based summary of the test results',
238 group='analysis')
239 parser_build.set_defaults(func=report)
240 parser_build.add_argument('source_dir',
Brad Bishopc342db32019-05-15 21:57:59 -0400241 help='source file/directory/URL that contain the test result files to summarise')
Brad Bishop40320b12019-03-26 16:08:25 -0400242 parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
243 parser_build.add_argument('--commit', help="Revision to report")
244 parser_build.add_argument('-t', '--tag', default='',
245 help='source_dir is a git repository, report on the tag specified from that repository')