blob: 59dd02521c9c2a4d054aa937e5896cf3e1c498d5 [file] [log] [blame]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001# Copyright (c) 2016, Intel Corporation.
2#
3# This program is free software; you can redistribute it and/or modify it
4# under the terms and conditions of the GNU General Public License,
5# version 2, as published by the Free Software Foundation.
6#
7# This program is distributed in the hope it will be useful, but WITHOUT
8# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10# more details.
11#
12"""Build performance test base classes and functionality"""
13import glob
14import json
15import logging
16import os
17import re
18import resource
19import shutil
20import socket
21import time
22import traceback
23import unittest
24from datetime import datetime, timedelta
25from functools import partial
26from multiprocessing import Process
27from multiprocessing import SimpleQueue
28
29import oe.path
30from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
31from oeqa.utils.git import GitError, GitRepo
32
33# Get logger for this module
34log = logging.getLogger('build-perf')
35
36# Our own version of runCmd which does not raise AssertErrors which would cause
37# errors to interpreted as failures
38runCmd2 = partial(runCmd, assert_error=False)
39
40
41class KernelDropCaches(object):
42 """Container of the functions for dropping kernel caches"""
43 sudo_passwd = None
44
45 @classmethod
46 def check(cls):
47 """Check permssions for dropping kernel caches"""
48 from getpass import getpass
49 from locale import getdefaultlocale
50 cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
51 ret = runCmd2(cmd, ignore_status=True, data=b'0')
52 if ret.output.startswith('sudo:'):
53 pass_str = getpass(
54 "\nThe script requires sudo access to drop caches between "
55 "builds (echo 3 > /proc/sys/vm/drop_caches).\n"
56 "Please enter your sudo password: ")
57 cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
58
59 @classmethod
60 def drop(cls):
61 """Drop kernel caches"""
62 cmd = ['sudo', '-k']
63 if cls.sudo_passwd:
64 cmd.append('-S')
65 input_data = cls.sudo_passwd + b'\n'
66 else:
67 cmd.append('-n')
68 input_data = b''
69 cmd += ['tee', '/proc/sys/vm/drop_caches']
70 input_data += b'3'
71 runCmd2(cmd, data=input_data)
72
73
74def str_to_fn(string):
75 """Convert string to a sanitized filename"""
76 return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
77
78
79class ResultsJsonEncoder(json.JSONEncoder):
80 """Extended encoder for build perf test results"""
81 unix_epoch = datetime.utcfromtimestamp(0)
82
83 def default(self, obj):
84 """Encoder for our types"""
85 if isinstance(obj, datetime):
86 # NOTE: we assume that all timestamps are in UTC time
87 return (obj - self.unix_epoch).total_seconds()
88 if isinstance(obj, timedelta):
89 return obj.total_seconds()
90 return json.JSONEncoder.default(self, obj)
91
92
93class BuildPerfTestResult(unittest.TextTestResult):
94 """Runner class for executing the individual tests"""
95 # List of test cases to run
96 test_run_queue = []
97
98 def __init__(self, out_dir, *args, **kwargs):
99 super(BuildPerfTestResult, self).__init__(*args, **kwargs)
100
101 self.out_dir = out_dir
102 # Get Git parameters
103 try:
104 self.repo = GitRepo('.')
105 except GitError:
106 self.repo = None
107 self.git_commit, self.git_commit_count, self.git_branch = \
108 self.get_git_revision()
109 self.hostname = socket.gethostname()
110 self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
111 self.start_time = self.elapsed_time = None
112 self.successes = []
113 log.info("Using Git branch:commit %s:%s (%s)", self.git_branch,
114 self.git_commit, self.git_commit_count)
115
116 def get_git_revision(self):
117 """Get git branch and commit under testing"""
118 commit = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT')
119 commit_cnt = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT_COUNT')
120 branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH')
121 if not self.repo and (not commit or not commit_cnt or not branch):
122 log.info("The current working directory doesn't seem to be a Git "
123 "repository clone. You can specify branch and commit "
124 "displayed in test results with OE_BUILDPERFTEST_GIT_BRANCH, "
125 "OE_BUILDPERFTEST_GIT_COMMIT and "
126 "OE_BUILDPERFTEST_GIT_COMMIT_COUNT environment variables")
127 else:
128 if not commit:
129 commit = self.repo.rev_parse('HEAD^0')
130 commit_cnt = self.repo.run_cmd(['rev-list', '--count', 'HEAD^0'])
131 if not branch:
132 branch = self.repo.get_current_branch()
133 if not branch:
134 log.debug('Currently on detached HEAD')
135 return str(commit), str(commit_cnt), str(branch)
136
137 def addSuccess(self, test):
138 """Record results from successful tests"""
139 super(BuildPerfTestResult, self).addSuccess(test)
140 self.successes.append((test, None))
141
142 def startTest(self, test):
143 """Pre-test hook"""
144 test.base_dir = self.out_dir
145 os.mkdir(test.out_dir)
146 log.info("Executing test %s: %s", test.name, test.shortDescription())
147 self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
148 super(BuildPerfTestResult, self).startTest(test)
149
150 def startTestRun(self):
151 """Pre-run hook"""
152 self.start_time = datetime.utcnow()
153
154 def stopTestRun(self):
155 """Pre-run hook"""
156 self.elapsed_time = datetime.utcnow() - self.start_time
157 self.write_results_json()
158
159 def all_results(self):
160 result_map = {'SUCCESS': self.successes,
161 'FAIL': self.failures,
162 'ERROR': self.errors,
163 'EXP_FAIL': self.expectedFailures,
164 'UNEXP_SUCCESS': self.unexpectedSuccesses,
165 'SKIPPED': self.skipped}
166 for status, tests in result_map.items():
167 for test in tests:
168 yield (status, test)
169
170
171 def update_globalres_file(self, filename):
172 """Write results to globalres csv file"""
173 # Map test names to time and size columns in globalres
174 # The tuples represent index and length of times and sizes
175 # respectively
176 gr_map = {'test1': ((0, 1), (8, 1)),
177 'test12': ((1, 1), (None, None)),
178 'test13': ((2, 1), (9, 1)),
179 'test2': ((3, 1), (None, None)),
180 'test3': ((4, 3), (None, None)),
181 'test4': ((7, 1), (10, 2))}
182
183 if self.repo:
184 git_tag_rev = self.repo.run_cmd(['describe', self.git_commit])
185 else:
186 git_tag_rev = self.git_commit
187
188 values = ['0'] * 12
189 for status, (test, msg) in self.all_results():
190 if status in ['ERROR', 'SKIPPED']:
191 continue
192 (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
193 if t_ind is not None:
194 values[t_ind:t_ind + t_len] = test.times
195 if s_ind is not None:
196 values[s_ind:s_ind + s_len] = test.sizes
197
198 log.debug("Writing globalres log to %s", filename)
199 with open(filename, 'a') as fobj:
200 fobj.write('{},{}:{},{},'.format(self.hostname,
201 self.git_branch,
202 self.git_commit,
203 git_tag_rev))
204 fobj.write(','.join(values) + '\n')
205
206 def write_results_json(self):
207 """Write test results into a json-formatted file"""
208 results = {'tester_host': self.hostname,
209 'git_branch': self.git_branch,
210 'git_commit': self.git_commit,
211 'git_commit_count': self.git_commit_count,
212 'product': self.product,
213 'start_time': self.start_time,
214 'elapsed_time': self.elapsed_time}
215
216 tests = {}
217 for status, (test, reason) in self.all_results():
218 tests[test.name] = {'name': test.name,
219 'description': test.shortDescription(),
220 'status': status,
221 'start_time': test.start_time,
222 'elapsed_time': test.elapsed_time,
223 'cmd_log_file': os.path.relpath(test.cmd_log_file,
224 self.out_dir),
225 'measurements': test.measurements}
226 results['tests'] = tests
227
228 with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
229 json.dump(results, fobj, indent=4, sort_keys=True,
230 cls=ResultsJsonEncoder)
231
232
233 def git_commit_results(self, repo_path, branch=None, tag=None):
234 """Commit results into a Git repository"""
235 repo = GitRepo(repo_path, is_topdir=True)
236 if not branch:
237 branch = self.git_branch
238 else:
239 # Replace keywords
240 branch = branch.format(git_branch=self.git_branch,
241 tester_host=self.hostname)
242
243 log.info("Committing test results into %s %s", repo_path, branch)
244 tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf')
245 try:
246 # Create new commit object from the new results
247 env_update = {'GIT_INDEX_FILE': tmp_index,
248 'GIT_WORK_TREE': self.out_dir}
249 repo.run_cmd('add .', env_update)
250 tree = repo.run_cmd('write-tree', env_update)
251 parent = repo.rev_parse(branch)
252 msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit)
253 git_cmd = ['commit-tree', tree, '-m', msg]
254 if parent:
255 git_cmd += ['-p', parent]
256 commit = repo.run_cmd(git_cmd, env_update)
257
258 # Update branch head
259 git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
260 if parent:
261 git_cmd.append(parent)
262 repo.run_cmd(git_cmd)
263
264 # Update current HEAD, if we're on branch 'branch'
265 if repo.get_current_branch() == branch:
266 log.info("Updating %s HEAD to latest commit", repo_path)
267 repo.run_cmd('reset --hard')
268
269 # Create (annotated) tag
270 if tag:
271 # Find tags matching the pattern
272 tag_keywords = dict(git_branch=self.git_branch,
273 git_commit=self.git_commit,
274 git_commit_count=self.git_commit_count,
275 tester_host=self.hostname,
276 tag_num='[0-9]{1,5}')
277 tag_re = re.compile(tag.format(**tag_keywords) + '$')
278 tag_keywords['tag_num'] = 0
279 for existing_tag in repo.run_cmd('tag').splitlines():
280 if tag_re.match(existing_tag):
281 tag_keywords['tag_num'] += 1
282
283 tag = tag.format(**tag_keywords)
284 msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'],
285 self.git_branch,
286 self.git_commit)
287 repo.run_cmd(['tag', '-a', '-m', msg, tag, commit])
288
289 finally:
290 if os.path.exists(tmp_index):
291 os.unlink(tmp_index)
292
293
294class BuildPerfTestCase(unittest.TestCase):
295 """Base class for build performance tests"""
296 SYSRES = 'sysres'
297 DISKUSAGE = 'diskusage'
298 build_target = None
299
300 def __init__(self, *args, **kwargs):
301 super(BuildPerfTestCase, self).__init__(*args, **kwargs)
302 self.name = self._testMethodName
303 self.base_dir = None
304 self.start_time = None
305 self.elapsed_time = None
306 self.measurements = []
307 self.bb_vars = get_bb_vars()
308 # TODO: remove 'times' and 'sizes' arrays when globalres support is
309 # removed
310 self.times = []
311 self.sizes = []
312
313 @property
314 def out_dir(self):
315 return os.path.join(self.base_dir, self.name)
316
317 @property
318 def cmd_log_file(self):
319 return os.path.join(self.out_dir, 'commands.log')
320
321 def setUp(self):
322 """Set-up fixture for each test"""
323 if self.build_target:
324 self.log_cmd_output(['bitbake', self.build_target,
325 '-c', 'fetchall'])
326
327 def run(self, *args, **kwargs):
328 """Run test"""
329 self.start_time = datetime.now()
330 super(BuildPerfTestCase, self).run(*args, **kwargs)
331 self.elapsed_time = datetime.now() - self.start_time
332
333 def log_cmd_output(self, cmd):
334 """Run a command and log it's output"""
335 cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
336 log.info("Logging command: %s", cmd_str)
337 try:
338 with open(self.cmd_log_file, 'a') as fobj:
339 runCmd2(cmd, stdout=fobj)
340 except CommandError as err:
341 log.error("Command failed: %s", err.retcode)
342 raise
343
344 def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
345 """Measure system resource usage of a command"""
346 def _worker(data_q, cmd, **kwargs):
347 """Worker process for measuring resources"""
348 try:
349 start_time = datetime.now()
350 ret = runCmd2(cmd, **kwargs)
351 etime = datetime.now() - start_time
352 rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
353 iostat = {}
354 with open('/proc/{}/io'.format(os.getpid())) as fobj:
355 for line in fobj.readlines():
356 key, val = line.split(':')
357 iostat[key] = int(val)
358 rusage = {}
359 # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
360 # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
361 for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
362 'ru_majflt', 'ru_inblock', 'ru_oublock',
363 'ru_nvcsw', 'ru_nivcsw']:
364 rusage[key] = getattr(rusage_struct, key)
365 data_q.put({'ret': ret,
366 'start_time': start_time,
367 'elapsed_time': etime,
368 'rusage': rusage,
369 'iostat': iostat})
370 except Exception as err:
371 data_q.put(err)
372
373 cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
374 log.info("Timing command: %s", cmd_str)
375 data_q = SimpleQueue()
376 try:
377 with open(self.cmd_log_file, 'a') as fobj:
378 proc = Process(target=_worker, args=(data_q, cmd,),
379 kwargs={'stdout': fobj})
380 proc.start()
381 data = data_q.get()
382 proc.join()
383 if isinstance(data, Exception):
384 raise data
385 except CommandError:
386 log.error("Command '%s' failed, see %s for more details", cmd_str,
387 self.cmd_log_file)
388 raise
389 etime = data['elapsed_time']
390
391 measurement = {'type': self.SYSRES,
392 'name': name,
393 'legend': legend}
394 measurement['values'] = {'start_time': data['start_time'],
395 'elapsed_time': etime,
396 'rusage': data['rusage'],
397 'iostat': data['iostat']}
398 if save_bs:
399 bs_file = self.save_buildstats(legend)
400 measurement['values']['buildstats_file'] = \
401 os.path.relpath(bs_file, self.base_dir)
402
403 self.measurements.append(measurement)
404
405 # Append to 'times' array for globalres log
406 e_sec = etime.total_seconds()
407 self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
408 int((e_sec % 3600) / 60),
409 e_sec % 60))
410
411 def measure_disk_usage(self, path, name, legend, apparent_size=False):
412 """Estimate disk usage of a file or directory"""
413 cmd = ['du', '-s', '--block-size', '1024']
414 if apparent_size:
415 cmd.append('--apparent-size')
416 cmd.append(path)
417
418 ret = runCmd2(cmd)
419 size = int(ret.output.split()[0])
420 log.debug("Size of %s path is %s", path, size)
421 measurement = {'type': self.DISKUSAGE,
422 'name': name,
423 'legend': legend}
424 measurement['values'] = {'size': size}
425 self.measurements.append(measurement)
426 # Append to 'sizes' array for globalres log
427 self.sizes.append(str(size))
428
429 def save_buildstats(self, label=None):
430 """Save buildstats"""
431 def split_nevr(nevr):
432 """Split name and version information from recipe "nevr" string"""
433 n_e_v, revision = nevr.rsplit('-', 1)
434 match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
435 n_e_v)
436 if not match:
437 # If we're not able to parse a version starting with a number, just
438 # take the part after last dash
439 match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
440 n_e_v)
441 name = match.group('name')
442 version = match.group('version')
443 epoch = match.group('epoch')
444 return name, epoch, version, revision
445
446 def bs_to_json(filename):
447 """Convert (task) buildstats file into json format"""
448 bs_json = {'iostat': {},
449 'rusage': {},
450 'child_rusage': {}}
451 with open(filename) as fobj:
452 for line in fobj.readlines():
453 key, val = line.split(':', 1)
454 val = val.strip()
455 if key == 'Started':
456 start_time = datetime.utcfromtimestamp(float(val))
457 bs_json['start_time'] = start_time
458 elif key == 'Ended':
459 end_time = datetime.utcfromtimestamp(float(val))
460 elif key.startswith('IO '):
461 split = key.split()
462 bs_json['iostat'][split[1]] = int(val)
463 elif key.find('rusage') >= 0:
464 split = key.split()
465 ru_key = split[-1]
466 if ru_key in ('ru_stime', 'ru_utime'):
467 val = float(val)
468 else:
469 val = int(val)
470 ru_type = 'rusage' if split[0] == 'rusage' else \
471 'child_rusage'
472 bs_json[ru_type][ru_key] = val
473 elif key == 'Status':
474 bs_json['status'] = val
475 bs_json['elapsed_time'] = end_time - start_time
476 return bs_json
477
478 log.info('Saving buildstats in JSON format')
479 bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
480 if len(bs_dirs) > 1:
481 log.warning("Multiple buildstats found for test %s, only "
482 "archiving the last one", self.name)
483 bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
484
485 buildstats = []
486 for fname in os.listdir(bs_dir):
487 recipe_dir = os.path.join(bs_dir, fname)
488 if not os.path.isdir(recipe_dir):
489 continue
490 name, epoch, version, revision = split_nevr(fname)
491 recipe_bs = {'name': name,
492 'epoch': epoch,
493 'version': version,
494 'revision': revision,
495 'tasks': {}}
496 for task in os.listdir(recipe_dir):
497 recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
498 task))
499 buildstats.append(recipe_bs)
500
501 # Write buildstats into json file
502 postfix = '.' + str_to_fn(label) if label else ''
503 postfix += '.json'
504 outfile = os.path.join(self.out_dir, 'buildstats' + postfix)
505 with open(outfile, 'w') as fobj:
506 json.dump(buildstats, fobj, indent=4, sort_keys=True,
507 cls=ResultsJsonEncoder)
508 return outfile
509
510 def rm_tmp(self):
511 """Cleanup temporary/intermediate files and directories"""
512 log.debug("Removing temporary and cache files")
513 for name in ['bitbake.lock', 'conf/sanity_info',
514 self.bb_vars['TMPDIR']]:
515 oe.path.remove(name, recurse=True)
516
517 def rm_sstate(self):
518 """Remove sstate directory"""
519 log.debug("Removing sstate-cache")
520 oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
521
522 def rm_cache(self):
523 """Drop bitbake caches"""
524 oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
525
526 @staticmethod
527 def sync():
528 """Sync and drop kernel caches"""
529 log.debug("Syncing and dropping kernel caches""")
530 KernelDropCaches.drop()
531 os.sync()
532 # Wait a bit for all the dirty blocks to be written onto disk
533 time.sleep(3)
534
535
536class BuildPerfTestLoader(unittest.TestLoader):
537 """Test loader for build performance tests"""
538 sortTestMethodsUsing = None
539
540
541class BuildPerfTestRunner(unittest.TextTestRunner):
542 """Test loader for build performance tests"""
543 sortTestMethodsUsing = None
544
545 def __init__(self, out_dir, *args, **kwargs):
546 super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
547 self.out_dir = out_dir
548
549 def _makeResult(self):
550 return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
551 self.verbosity)