blob: 6487cd9bff614e01f31f73b245a43285a9fd7c25 [file] [log] [blame]
Brad Bishop40320b12019-03-26 16:08:25 -04001# test case management tool - manual execution from testopia test cases
2#
3# Copyright (c) 2018, Intel Corporation.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms and conditions of the GNU General Public License,
7# version 2, as published by the Free Software Foundation.
8#
9# This program is distributed in the hope it will be useful, but WITHOUT
10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details.
13#
14import argparse
15import json
16import os
17import sys
18import datetime
19import re
20from oeqa.core.runner import OETestResultJSONHelper
21
22
23def load_json_file(file):
24 with open(file, "r") as f:
25 return json.load(f)
26
27
28class ManualTestRunner(object):
29 def __init__(self):
30 self.jdata = ''
31 self.test_module = ''
32 self.test_cases_id = ''
33 self.configuration = ''
34 self.starttime = ''
35 self.result_id = ''
36 self.write_dir = ''
37
38 def _get_testcases(self, file):
39 self.jdata = load_json_file(file)
40 self.test_cases_id = []
41 self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
42 for i in self.jdata:
43 self.test_cases_id.append(i['test']['@alias'])
44
45 def _get_input(self, config):
46 while True:
47 output = input('{} = '.format(config))
48 if re.match('^[a-zA-Z0-9_-]+$', output):
49 break
50 print('Only alphanumeric and underscore/hyphen are allowed. Please try again')
51 return output
52
53 def _create_config(self):
54 from oeqa.utils.metadata import get_layers
55 from oeqa.utils.commands import get_bb_var
56 from resulttool.resultutils import store_map
57
58 layers = get_layers(get_bb_var('BBLAYERS'))
59 self.configuration = {}
60 self.configuration['LAYERS'] = layers
61 current_datetime = datetime.datetime.now()
62 self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
63 self.configuration['STARTTIME'] = self.starttime
64 self.configuration['TEST_TYPE'] = 'manual'
65 self.configuration['TEST_MODULE'] = self.test_module
66
67 extra_config = set(store_map['manual']) - set(self.configuration)
68 for config in sorted(extra_config):
69 print('---------------------------------------------')
70 print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).'
71 % config)
72 print('---------------------------------------------')
73 value_conf = self._get_input('Configuration Value')
74 print('---------------------------------------------\n')
75 self.configuration[config] = value_conf
76
77 def _create_result_id(self):
78 self.result_id = 'manual_' + self.test_module + '_' + self.starttime
79
80 def _execute_test_steps(self, test_id):
81 test_result = {}
82 total_steps = len(self.jdata[test_id]['test']['execution'].keys())
83 print('------------------------------------------------------------------------')
84 print('Executing test case:' + '' '' + self.test_cases_id[test_id])
85 print('------------------------------------------------------------------------')
86 print('You have total ' + str(total_steps) + ' test steps to be executed.')
87 print('------------------------------------------------------------------------\n')
88 for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
89 print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
90 print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
91 done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
92 while True:
93 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
94 done = done.lower()
95 result_types = {'p':'PASSED',
96 'f':'FAILED',
97 'b':'BLOCKED',
98 's':'SKIPPED'}
99 if done in result_types:
100 for r in result_types:
101 if done == r:
102 res = result_types[r]
103 if res == 'FAILED':
104 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
105 test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}})
106 else:
107 test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}})
108 break
109 print('Invalid input!')
110 return test_result
111
112 def _create_write_dir(self):
113 basepath = os.environ['BUILDDIR']
114 self.write_dir = basepath + '/tmp/log/manual/'
115
116 def run_test(self, file):
117 self._get_testcases(file)
118 self._create_config()
119 self._create_result_id()
120 self._create_write_dir()
121 test_results = {}
122 print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
123 for i in range(0, len(self.jdata)):
124 test_result = self._execute_test_steps(i)
125 test_results.update(test_result)
126 return self.configuration, self.result_id, self.write_dir, test_results
127
128def manualexecution(args, logger):
129 testrunner = ManualTestRunner()
130 get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
131 resultjsonhelper = OETestResultJSONHelper()
132 resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
133 get_test_results)
134 return 0
135
136def register_commands(subparsers):
137 """Register subcommands from this plugin"""
138 parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
139 description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
140 group='manualexecution')
141 parser_build.set_defaults(func=manualexecution)
142 parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')