reset upstream subtrees to yocto 2.6
Reset the following subtrees on thud HEAD:
poky: 87e3a9739d
meta-openembedded: 6094ae18c8
meta-security: 31dc4e7532
meta-raspberrypi: a48743dc36
meta-xilinx: c42016e2e6
Also re-apply backports that didn't make it into thud:
poky:
17726d0 systemd-systemctl-native: handle Install wildcards
meta-openembedded:
4321a5d libtinyxml2: update to 7.0.1
042f0a3 libcereal: Add native and nativesdk classes
e23284f libcereal: Allow empty package
030e8d4 rsyslog: curl-less build with fmhttp PACKAGECONFIG
179a1b9 gtest: update to 1.8.1
Squashed OpenBMC subtree compatibility updates:
meta-aspeed:
Brad Bishop (1):
aspeed: add yocto 2.6 compatibility
meta-ibm:
Brad Bishop (1):
ibm: prepare for yocto 2.6
meta-ingrasys:
Brad Bishop (1):
ingrasys: set layer compatibility to yocto 2.6
meta-openpower:
Brad Bishop (1):
openpower: set layer compatibility to yocto 2.6
meta-phosphor:
Brad Bishop (3):
phosphor: set layer compatibility to thud
phosphor: libgpg-error: drop patches
phosphor: react to fitimage artifact rename
Ed Tanous (4):
Dropbear: upgrade options for latest upgrade
yocto2.6: update openssl options
busybox: remove upstream watchdog patch
systemd: Rebase CONFIG_CGROUP_BPF patch
Change-Id: I7b1fe71cca880d0372a82d94b5fd785323e3a9e7
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta/lib/oeqa/core/context.py b/poky/meta/lib/oeqa/core/context.py
index ef00845..821aec8 100644
--- a/poky/meta/lib/oeqa/core/context.py
+++ b/poky/meta/lib/oeqa/core/context.py
@@ -57,14 +57,21 @@
modules_required, filters)
self.suites = self.loader.discover()
- def runTests(self, skips=[]):
+ def runTests(self, processes=None, skips=[]):
self.runner = self.runnerClass(self, descriptions=False, verbosity=2)
# Dinamically skip those tests specified though arguments
self.skipTests(skips)
self._run_start_time = time.time()
- result = self.runner.run(self.suites)
+ if processes:
+ from oeqa.core.utils.concurrencytest import ConcurrentTestSuite
+
+ concurrent_suite = ConcurrentTestSuite(self.suites, processes)
+ result = self.runner.run(concurrent_suite)
+ else:
+ self.runner.buffer = True
+ result = self.runner.run(self.suites)
self._run_end_time = time.time()
return result
diff --git a/poky/meta/lib/oeqa/core/decorator/data.py b/poky/meta/lib/oeqa/core/decorator/data.py
index 31c6dd6..f0f65ab 100644
--- a/poky/meta/lib/oeqa/core/decorator/data.py
+++ b/poky/meta/lib/oeqa/core/decorator/data.py
@@ -54,6 +54,20 @@
self.case.skipTest(self.msg)
@registerDecorator
+class skipIfInDataVar(OETestDecorator):
+ """
+ Skip test if value is in data store's variable.
+ """
+
+ attrs = ('var', 'value', 'msg')
+ def setUpDecorator(self):
+ msg = ('Checking if %r value contains %r to skip '
+ 'the test' % (self.var, self.value))
+ self.logger.debug(msg)
+ if self.value in (self.case.td.get(self.var)):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
class skipIfNotInDataVar(OETestDecorator):
"""
Skip test if value is not in data store's variable.
diff --git a/poky/meta/lib/oeqa/core/decorator/depends.py b/poky/meta/lib/oeqa/core/decorator/depends.py
index 69c604d..950dbaa 100644
--- a/poky/meta/lib/oeqa/core/decorator/depends.py
+++ b/poky/meta/lib/oeqa/core/decorator/depends.py
@@ -63,13 +63,15 @@
return [cases[case_id] for case_id in cases_ordered]
def _skipTestDependency(case, depends):
- skipReasons = ['errors', 'failures', 'skipped']
-
- for reason in skipReasons:
- for test, _ in getattr(case.tc.results, reason):
- if test.id() in depends:
- raise SkipTest("Test case %s depends on %s and was in %s." \
- % (case.id(), test.id(), reason))
+ for dep in depends:
+ found = False
+ for test, _ in case.tc.results.successes:
+ if test.id() == dep:
+ found = True
+ break
+ if not found:
+ raise SkipTest("Test case %s depends on %s but it didn't pass/run." \
+ % (case.id(), dep))
@registerDecorator
class OETestDepends(OETestDiscover):
diff --git a/poky/meta/lib/oeqa/core/loader.py b/poky/meta/lib/oeqa/core/loader.py
index 2255cf1..e66de32 100644
--- a/poky/meta/lib/oeqa/core/loader.py
+++ b/poky/meta/lib/oeqa/core/loader.py
@@ -44,6 +44,8 @@
# Assumption: package and module names do not contain upper case
# characters, whereas class names do
m = re.match(r'^(\w+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
+ if not m:
+ continue
module_name, class_name, test_name = m.groups()
diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py
index f8bb23f..df88b85 100644
--- a/poky/meta/lib/oeqa/core/runner.py
+++ b/poky/meta/lib/oeqa/core/runner.py
@@ -36,6 +36,9 @@
super(OETestResult, self).__init__(*args, **kwargs)
self.successes = []
+ self.starttime = {}
+ self.endtime = {}
+ self.progressinfo = {}
# Inject into tc so that TestDepends decorator can see results
tc.results = self
@@ -43,13 +46,25 @@
self.tc = tc
def startTest(self, test):
- # Allow us to trigger the testcase buffer mode on a per test basis
- # so stdout/stderr are only printed upon failure. Enables debugging
- # but clean output
- if hasattr(test, "buffer"):
- self.buffer = test.buffer
+ # May have been set by concurrencytest
+ if test.id() not in self.starttime:
+ self.starttime[test.id()] = time.time()
super(OETestResult, self).startTest(test)
+ def stopTest(self, test):
+ self.endtime[test.id()] = time.time()
+ super(OETestResult, self).stopTest(test)
+ if test.id() in self.progressinfo:
+ self.tc.logger.info(self.progressinfo[test.id()])
+
+ # Print the errors/failures early to aid/speed debugging, its a pain
+ # to wait until selftest finishes to see them.
+ for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
+ for (scase, msg) in getattr(self, t):
+ if test.id() == scase.id():
+ self.tc.logger.info(str(msg))
+ break
+
def logSummary(self, component, context_msg=''):
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
self.tc.logger.info("SUMMARY:")
@@ -78,13 +93,13 @@
# When fails at module or class level the class name is passed as string
# so figure out to see if match
- m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
+ m = re.search(r"^setUpModule \((?P<module_name>.*)\)$", scase_str)
if m:
if case.__class__.__module__ == m.group('module_name'):
found = True
break
- m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
+ m = re.search(r"^setUpClass \((?P<class_name>.*)\)$", scase_str)
if m:
class_name = "%s.%s" % (case.__class__.__module__,
case.__class__.__name__)
@@ -122,9 +137,13 @@
if hasattr(d, 'oeid'):
oeid = d.oeid
+ t = ""
+ if case.id() in self.starttime and case.id() in self.endtime:
+ t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"
+
if status not in logs:
logs[status] = []
- logs[status].append("RESULTS - %s - Testcase %s: %s" % (case.id(), oeid, status))
+ logs[status].append("RESULTS - %s - Testcase %s: %s%s" % (case.id(), oeid, status, t))
if log:
result[case.id()] = {'status': status, 'log': log}
else:
diff --git a/poky/meta/lib/oeqa/core/utils/concurrencytest.py b/poky/meta/lib/oeqa/core/utils/concurrencytest.py
new file mode 100644
index 0000000..f050289
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python3
+#
+# Modified for use in OE by Richard Purdie, 2018
+#
+# Modified by: Corey Goldberg, 2013
+# License: GPLv2+
+#
+# Original code from:
+# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
+# Copyright (C) 2005-2011 Canonical Ltd
+# License: GPLv2+
+
+import os
+import sys
+import traceback
+import unittest
+import subprocess
+import testtools
+import threading
+import time
+import io
+
+from queue import Queue
+from itertools import cycle
+from subunit import ProtocolTestCase, TestProtocolClient
+from subunit.test_results import AutoTimingTestResultDecorator
+from testtools import ThreadsafeForwardingResult, iterate_tests
+
+import bb.utils
+import oe.path
+
+_all__ = [
+ 'ConcurrentTestSuite',
+ 'fork_for_tests',
+ 'partition_tests',
+]
+
+#
+# Patch the version from testtools to allow access to _test_start and allow
+# computation of timing information and threading progress
+#
+class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
+
+ def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests):
+ super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
+ self.threadnum = threadnum
+ self.totalinprocess = totalinprocess
+ self.totaltests = totaltests
+
+ def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+ self.semaphore.acquire()
+ try:
+ self.result.starttime[test.id()] = self._test_start.timestamp()
+ self.result.threadprogress[self.threadnum].append(test.id())
+ totalprogress = sum(len(x) for x in self.result.threadprogress.values())
+ self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s)" % (
+ self.threadnum,
+ len(self.result.threadprogress[self.threadnum]),
+ self.totalinprocess,
+ totalprogress,
+ self.totaltests,
+ "{0:.2f}".format(time.time()-self._test_start.timestamp()),
+ test.id())
+ finally:
+ self.semaphore.release()
+ super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
+
+#
+# A dummy structure to add to io.StringIO so that the .buffer object
+# is available and accepts writes. This allows unittest with buffer=True
+# to interact ok with subunit which wants to access sys.stdout.buffer.
+#
+class dummybuf(object):
+ def __init__(self, parent):
+ self.p = parent
+ def write(self, data):
+ self.p.write(data.decode("utf-8"))
+
+#
+# Taken from testtools.ConncurrencyTestSuite but modified for OE use
+#
+class ConcurrentTestSuite(unittest.TestSuite):
+
+ def __init__(self, suite, processes):
+ super(ConcurrentTestSuite, self).__init__([suite])
+ self.processes = processes
+
+ def run(self, result):
+ tests, totaltests = fork_for_tests(self.processes, self)
+ try:
+ threads = {}
+ queue = Queue()
+ semaphore = threading.Semaphore(1)
+ result.threadprogress = {}
+ for i, (test, testnum) in enumerate(tests):
+ result.threadprogress[i] = []
+ process_result = BBThreadsafeForwardingResult(result, semaphore, i, testnum, totaltests)
+ # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
+ # as per default in parent code
+ process_result.buffer = True
+ # We have to add a buffer object to stdout to keep subunit happy
+ process_result._stderr_buffer = io.StringIO()
+ process_result._stderr_buffer.buffer = dummybuf(process_result._stderr_buffer)
+ process_result._stdout_buffer = io.StringIO()
+ process_result._stdout_buffer.buffer = dummybuf(process_result._stdout_buffer)
+ reader_thread = threading.Thread(
+ target=self._run_test, args=(test, process_result, queue))
+ threads[test] = reader_thread, process_result
+ reader_thread.start()
+ while threads:
+ finished_test = queue.get()
+ threads[finished_test][0].join()
+ del threads[finished_test]
+ except:
+ for thread, process_result in threads.values():
+ process_result.stop()
+ raise
+ finally:
+ for test in tests:
+ test[0]._stream.close()
+
+ def _run_test(self, test, process_result, queue):
+ try:
+ try:
+ test.run(process_result)
+ except Exception:
+ # The run logic itself failed
+ case = testtools.ErrorHolder(
+ "broken-runner",
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ queue.put(test)
+
+def removebuilddir(d):
+ delay = 5
+ while delay and os.path.exists(d + "/bitbake.lock"):
+ time.sleep(1)
+ delay = delay - 1
+ bb.utils.prunedir(d)
+
+def fork_for_tests(concurrency_num, suite):
+ result = []
+ test_blocks = partition_tests(suite, concurrency_num)
+ # Clear the tests from the original suite so it doesn't keep them alive
+ suite._tests[:] = []
+ totaltests = sum(len(x) for x in test_blocks)
+ for process_tests in test_blocks:
+ numtests = len(process_tests)
+ process_suite = unittest.TestSuite(process_tests)
+ # Also clear each split list so new suite has only reference
+ process_tests[:] = []
+ c2pread, c2pwrite = os.pipe()
+ # Clear buffers before fork to avoid duplicate output
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid == 0:
+ ourpid = os.getpid()
+ try:
+ newbuilddir = None
+ stream = os.fdopen(c2pwrite, 'wb', 1)
+ os.close(c2pread)
+
+ # Create a new separate BUILDDIR for each group of tests
+ if 'BUILDDIR' in os.environ:
+ builddir = os.environ['BUILDDIR']
+ newbuilddir = builddir + "-st-" + str(ourpid)
+ selftestdir = os.path.abspath(builddir + "/../meta-selftest")
+ newselftestdir = newbuilddir + "/meta-selftest"
+
+ bb.utils.mkdirhier(newbuilddir)
+ oe.path.copytree(builddir + "/conf", newbuilddir + "/conf")
+ oe.path.copytree(builddir + "/cache", newbuilddir + "/cache")
+ oe.path.copytree(selftestdir, newselftestdir)
+
+ for e in os.environ:
+ if builddir in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
+
+ subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
+
+ # Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow
+ subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True)
+
+ os.chdir(newbuilddir)
+
+ for t in process_suite:
+ if not hasattr(t, "tc"):
+ continue
+ cp = t.tc.config_paths
+ for p in cp:
+ if selftestdir in cp[p] and newselftestdir not in cp[p]:
+ cp[p] = cp[p].replace(selftestdir, newselftestdir)
+ if builddir in cp[p] and newbuilddir not in cp[p]:
+ cp[p] = cp[p].replace(builddir, newbuilddir)
+
+ # Leave stderr and stdout open so we can see test noise
+ # Close stdin so that the child goes away if it decides to
+ # read from stdin (otherwise its a roulette to see what
+ # child actually gets keystrokes for pdb etc).
+ newsi = os.open(os.devnull, os.O_RDWR)
+ os.dup2(newsi, sys.stdin.fileno())
+
+ subunit_client = TestProtocolClient(stream)
+ # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
+ # as per default in parent code
+ subunit_client.buffer = True
+ subunit_result = AutoTimingTestResultDecorator(subunit_client)
+ process_suite.run(subunit_result)
+ if ourpid != os.getpid():
+ os._exit(0)
+ if newbuilddir:
+ removebuilddir(newbuilddir)
+ except:
+ # Don't do anything with process children
+ if ourpid != os.getpid():
+ os._exit(1)
+ # Try and report traceback on stream, but exit with error
+ # even if stream couldn't be created or something else
+ # goes wrong. The traceback is formatted to a string and
+ # written in one go to avoid interleaving lines from
+ # multiple failing children.
+ try:
+ stream.write(traceback.format_exc().encode('utf-8'))
+ except:
+ sys.stderr.write(traceback.format_exc())
+ finally:
+ if newbuilddir:
+ removebuilddir(newbuilddir)
+ stream.flush()
+ os._exit(1)
+ stream.flush()
+ os._exit(0)
+ else:
+ os.close(c2pwrite)
+ stream = os.fdopen(c2pread, 'rb', 1)
+ test = ProtocolTestCase(stream)
+ result.append((test, numtests))
+ return result, totaltests
+
+def partition_tests(suite, count):
+ # Keep tests from the same class together but allow tests from modules
+ # to go to different processes to aid parallelisation.
+ modules = {}
+ for test in iterate_tests(suite):
+ m = test.__module__ + "." + test.__class__.__name__
+ if m not in modules:
+ modules[m] = []
+ modules[m].append(test)
+
+ # Simply divide the test blocks between the available processes
+ partitions = [list() for _ in range(count)]
+ for partition, m in zip(cycle(partitions), modules):
+ partition.extend(modules[m])
+
+ # No point in empty threads so drop them
+ return [p for p in partitions if p]
+
diff --git a/poky/meta/lib/oeqa/files/test.pl b/poky/meta/lib/oeqa/files/test.pl
deleted file mode 100644
index 689c8f1..0000000
--- a/poky/meta/lib/oeqa/files/test.pl
+++ /dev/null
@@ -1,2 +0,0 @@
-$a = 9.01e+21 - 9.01e+21 + 0.01;
-print ("the value of a is ", $a, "\n");
diff --git a/poky/meta/lib/oeqa/files/test.py b/poky/meta/lib/oeqa/files/test.py
deleted file mode 100644
index f389225..0000000
--- a/poky/meta/lib/oeqa/files/test.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import os
-
-os.system('touch /tmp/testfile.python')
-
-a = 9.01e+21 - 9.01e+21 + 0.01
-print("the value of a is %s" % a)
diff --git a/poky/meta/lib/oeqa/manual/abat.patch b/poky/meta/lib/oeqa/manual/abat.patch
new file mode 100644
index 0000000..1541ac8
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/abat.patch
@@ -0,0 +1,64 @@
+########
+diff --git a/glxgears_check.sh b/glxgears_check.sh
+index 17622b8..c4d3b97 100755
+--- a/glxgears_check.sh
++++ b/glxgears_check.sh
+@@ -31,7 +31,7 @@ else
+
+ sleep 6
+
+- XPID=$( ps ax | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
++ XPID=$( ps | awk '{print $1, $5}' | grep glxgears | awk '{print $1}')
+ if [ ! -z "$XPID" ]; then
+ kill -9 $XPID >/dev/null 2>&1
+ echo "glxgears can run, PASS!"
+diff --git a/x_close.sh b/x_close.sh
+index e287be1..3429f1a 100755
+--- a/x_close.sh
++++ b/x_close.sh
+@@ -22,7 +22,7 @@
+ #
+ function close_proc(){
+ echo "kill process Xorg"
+-XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" | awk '{print $1}')
++XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" | awk '{print $1}')
+ if [ ! -z "$XPID" ]; then
+ kill $XPID
+ sleep 4
+diff --git a/x_start.sh b/x_start.sh
+index 9cf6eab..2305796 100755
+--- a/x_start.sh
++++ b/x_start.sh
+@@ -24,7 +24,7 @@
+ X_ERROR=0
+
+ #test whether X has started
+-PXID=$(ps ax |awk '{print $1,$5}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
++PXID=$(ps |awk '{print $1,$6}' |egrep "Xorg$|X$" |grep -v grep | awk '{print $1}')
+ if [ ! -z "$PXID" ]; then
+ echo "[WARNING] Xorg has started!"
+ XORG_STATUS="started"
+@@ -35,9 +35,11 @@ else
+ #start up the x server
+ echo "Start up the X server for test in display $DISPLAY................"
+
+- $XORG_DIR/bin/X >/dev/null 2>&1 &
++ #$XORG_DIR/bin/X >/dev/null 2>&1 &
++ #sleep 8
++ #xterm &
++ /etc/init.d/xserver-nodm start &
+ sleep 8
+- xterm &
+ fi
+ XLOG_FILE=/var/log/Xorg.0.log
+ [ -f $XORG_DIR/var/log/Xorg.0.log ] && XLOG_FILE=$XORG_DIR/var/log/Xorg.0.log
+@@ -54,7 +56,7 @@ fi
+ X_ERROR=1
+ fi
+
+- XPID=$( ps ax | awk '{print $1, $5}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
++ XPID=$( ps | awk '{print $1, $6}' | egrep "X$|Xorg$" |grep -v grep| awk '{print $1}')
+ if [ -z "$XPID" ]; then
+ echo "Start up X server FAIL!"
+ echo
+########
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/bsp-hw.json b/poky/meta/lib/oeqa/manual/bsp-hw.json
new file mode 100644
index 0000000..a2b1d3e
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/bsp-hw.json
@@ -0,0 +1,1200 @@
+[
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.rpm_-__install_dependency_package",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a not previously installed RPM package or build one on local machine, which should have run-time dependency.For example, \"mc\" (Midnight Commander, which is a visual file manager) should depend on \"ncurses-terminfo\". \n\n$ bitbake mc \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Copy the package into a system folder (for example /home/root/rpm_packages). \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Run \"rpm -ivh package_name\" and check the output, for example \"rpm -ivh mc.rpm*\" should report the dependency on \"ncurses-terminfo\".\n\n\n\n",
+ "expected_results": "3 . rpm command should report message when some RPM installation depends on other packages."
+ }
+ },
+ "summary": "rpm_-__install_dependency_package"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.boot_and_install_from_USB",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "plugin usb which contains live image burned",
+ "expected_results": "User can choose install system from usb stick onto harddisk from boot menu or command line option \n"
+ },
+ "2": {
+ "action": "configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": "Installed system can boot up"
+ },
+ "3": {
+ "action": "boot the device and select option \"Install\" from boot menu",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "proceed through default install process",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Remove USB, and reboot into new installed system. \nNote: If installation was successfully completed and received this message \"\"(sdx): Volume was not properly unmounted...Please run fsck.\"\" ignore it because this was whitelisted according to bug 9652.",
+ "expected_results": ""
+ }
+ },
+ "summary": "boot_and_install_from_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.live_boot_from_USB",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Plugin usb which contains live image burned.",
+ "expected_results": "User can choose boot from live image on usb stick from boot menu or command line option"
+ },
+ "2": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Reboot the device and boot from USB stick.",
+ "expected_results": "Live image can boot up with usb stick"
+ }
+ },
+ "summary": "live_boot_from_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.boot_from_runlevel_3",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 3 by default, this is done by changing the line \n\n\nid:5:initdefault \n\nto \n\nid:3:initdefault \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Press \"F10\" or \"ctrl+x\" to boot system",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "If system ask you for a login type \"root\"",
+ "expected_results": "System should boot to run level 3, showing the command prompt."
+ }
+ },
+ "summary": "boot_from_runlevel_3"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.boot_from_runlevel_5",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 5 by default, this is done by changing the line \n\nid:3:initdefault \n\nto \n\nid:5:initdefault \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Press \"F10\" or \"ctrl+x\" to boot system \nNote: The test is only for sato image.",
+ "expected_results": "System should boot to runlevel 5 ."
+ }
+ },
+ "summary": "boot_from_runlevel_5"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.shutdown_system",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch terminal and run \"shutdown -h now\" or \"poweroff\"",
+ "expected_results": "System can be shutdown successfully . "
+ }
+ },
+ "summary": "shutdown_system"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.reboot_system",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch terminal and run \"reboot\"",
+ "expected_results": "System can reboot successfully . "
+ }
+ },
+ "summary": "reboot_system"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.switch_among_multi_applications_and_desktop",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "launch several applications(like contacts, file manager, notes, etc)",
+ "expected_results": "user could switch among multi applications and desktop"
+ },
+ "2": {
+ "action": "launch terminal",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "switch among multi applications and desktop",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "close applications \nNote: The case is for sato image only. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "switch_among_multi_applications_and_desktop"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.USB_-_mount",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot system \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Plug USB stick, it should be mount in /run/media/sd(x) If X-window system does not start and show USB device, then use the terminal to mount it, following the next steps: \na. Locate the usb stick (usually it is on /dev/sdb) \nb. Create a directory with \"mkdir stick\" (so you will have such a path as: /home/root/stick). \nc. Run the command \"mount /dev/sdb /home/root/stick\" to mount USB device on it. \n\n",
+ "expected_results": "USB device should be mounted in /run/media/sd(x) \nor in /home/root/stick \n\n"
+ },
+ "3": {
+ "action": "Then you can access USB stick (/home/root/stick) via Terminal or GUI and try various commands and actions like \"cp\", \"mv\", \"touch\" and \"rm\". Type \"dmesg\" command and check for recent mounted devices.",
+ "expected_results": "Basic commands work properly. The system sends a notification in \"dmesg\" command, showing that the USB stick is accessible and the device is mounted ."
+ }
+ },
+ "summary": "USB_-_mount"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.USB_-_read_files",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "view/copy successfully"
+ },
+ "2": {
+ "action": "plug usb stick",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "view files in usb by file browser",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "copy some files from usb to local hardware",
+ "expected_results": ""
+ }
+ },
+ "summary": "USB_-_read_files"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.USB_-_umount",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "usb directory in file browser automatically missed"
+ },
+ "2": {
+ "action": "plug usb stick",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "view files in usb by file browser \n4.unplug usb",
+ "expected_results": ""
+ }
+ },
+ "summary": "USB_-_umount"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.USB_-_write_files",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "create/copy successfully"
+ },
+ "2": {
+ "action": "plug usb stick",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "create files in usb \n4.copy some files from local hardware to usb",
+ "expected_results": ""
+ }
+ },
+ "summary": "USB_-_write_files"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.ethernet_static_ip_set_in_connman",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot the system and check internet connection is on . ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Launch connmand-properties (up-right corner on desktop)",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Choose Ethernet device and set a valid static ip address for it. \nFor example, in our internal network, we can set as following: \nip address: 10.239.48.xxx \nMask: 255.255.255.0 \nGateway (Broadcast): 10.239.48.255",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check the Network configuration with \"ifconfig\"",
+ "expected_results": "Static IP was set successfully \n"
+ },
+ "5": {
+ "action": "ping to another IP adress",
+ "expected_results": "Ping works correclty\n"
+ }
+ },
+ "summary": "ethernet_static_ip_set_in_connman"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.ethernet_get_IP_in_connman_via_DHCP",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch connmand-properties (up-right corner on your desktop). ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Check if Ethernet device can work properly with static IP, doing \"ping XXX.XXX.XXX.XXX\", once this is set.",
+ "expected_results": "Ping executed successfully . \n\n"
+ },
+ "3": {
+ "action": "Then choose DHCP method for Ethernet device in connmand-properties.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check with 'ifconfig\" and \"ping\" if Ethernet device get IP address via DHCP.",
+ "expected_results": "Ethernet device can get dynamic IP address via DHCP in connmand ."
+ }
+ },
+ "summary": "ethernet_get_IP_in_connman_via_DHCP"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.connman_offline_mode_in_connman-gnome",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch connman-properties after system booting \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "choose \"offline mode\" and check the connection of all network interfaces ",
+ "expected_results": "All connection should be off after clicking \"offline mode\" . "
+ }
+ },
+ "summary": "connman_offline_mode_in_connman-gnome"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.X_server_can_start_up_with_runlevel_5_boot",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot up system with default runlevel \n\n",
+ "expected_results": "X server can start up well and desktop display has no problem . \n\n"
+ },
+ "2": {
+ "action": "type runlevel at command prompt",
+ "expected_results": "Output:N 5"
+ }
+ },
+ "summary": "X_server_can_start_up_with_runlevel_5_boot"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.standby",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal; check output of \"date\" and launch script \"continue.sh\"",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, move mouse or press any key to make it resume (on NUC press power button)",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check \"date\" and script \"continue.sh\"",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Check if application can work as normal \ncontinue.sh as below: \n \n#!/bin/sh \n \ni=1 \nwhile [ 0 ] \ndo \n echo $i \n sleep 1 \n i=$((i+1)) \ndone ",
+ "expected_results": "Screen should resume back and script can run continuously incrementing the i's value from where it was before going to standby state. Date should be the same with the corresponding time increment."
+ }
+ },
+ "summary": "standby"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.check_CPU_utilization_after_standby",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Start up system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "run \"top\" command and check if there is any process eating CPU time",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "make system into standby and resume it",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "run \"top\" command and check if there is any difference with the data before standby",
+ "expected_results": "There should be no big difference before/after standby with \"top\" . "
+ }
+ },
+ "summary": "check_CPU_utilization_after_standby"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_if_LAN_device_works_well_after_resume_from_suspend_state",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, move mouse or press any key to make it resume",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "check ping status \n\nNote: This TC apply only for core-image-full-cmd and core-image-lsb .",
+ "expected_results": "ping should always work before/after standby"
+ }
+ },
+ "summary": "Test_if_LAN_device_works_well_after_resume_from_suspend_state"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_if_usb_hid_device_works_well_after_resume_from_suspend_state",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system and launch terminal",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "echo \"mem\" > /sys/power/state",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After system go into S3 mode, resume the device by pressing the power button or using HID devices",
+ "expected_results": "Devices resumes "
+ },
+ "4": {
+ "action": "check usb mouse and keyboard",
+ "expected_results": "Usb mouse and keyboard should work"
+ }
+ },
+ "summary": "Test_if_usb_hid_device_works_well_after_resume_from_suspend_state"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.click_terminal_icon_on_X_desktop",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After system launch and X start up, click terminal icon on desktop",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Check if only one terminal window launched and no other problem met",
+ "expected_results": "There should be no problem after launching terminal . "
+ }
+ },
+ "summary": "click_terminal_icon_on_X_desktop"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.Add_multiple_files_in_media_player",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch media player",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Add multiple files(5 files) in media player at same time (ogg or wav)",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Verify the sound.",
+ "expected_results": "Media player should be OK with this action, it reproduce files correctly."
+ }
+ },
+ "summary": "Add_multiple_files_in_media_player"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.check_bash_in_image",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After system is up, check if bash command exists with command \"which bash\"",
+ "expected_results": "bash command should exist in image giving something as below \"/bin/bash\""
+ }
+ },
+ "summary": "check_bash_in_image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.MicroSD_-__mount",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "system notify that MicroSDis accessible"
+ },
+ "2": {
+ "action": "plug MicroSD card",
+ "expected_results": ""
+ }
+ },
+ "summary": "MicroSD_-__mount"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.MicroSD_-__read_files",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "view/copy successfully"
+ },
+ "2": {
+ "action": "plug MicroSD card",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "view files inMicroSD by file browser",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "copy some files fromMicroSD to local hardware",
+ "expected_results": ""
+ }
+ },
+ "summary": "MicroSD_-__read_files"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.MicroSD_-__umount",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "MicroSD in file browser automatically missed . "
+ },
+ "2": {
+ "action": "plug MicroSD card",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "view files in MicroSDby file browser",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "unplug MicroSD",
+ "expected_results": ""
+ }
+ },
+ "summary": "MicroSD_-__umount"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.MicroSD_-__write_files",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot system",
+ "expected_results": "create/copy successfully"
+ },
+ "2": {
+ "action": "plug MicroSD card",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "create files in MicroSD",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "copy some files from local hardware to MicroSD",
+ "expected_results": ""
+ }
+ },
+ "summary": "MicroSD_-__write_files"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.video_-_libva_check_(ogg_video_play)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "check if libva is installed on system (or libogg)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "copy sample ogg file to system",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player can play the ogg file",
+ "expected_results": "ogg file can be played without problem when libva is used (or libogg) "
+ }
+ },
+ "summary": "video_-_libva_check_(ogg_video_play)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.media_player_-_unable_to_play_MPEG-1_without_\"commercial\"_flag",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Copy sample MPEG-1 file to a system without the \"commercial\" flag.",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Launch media player and make sure it cannot play the MPEG-1 file.",
+ "expected_results": "MPEG-1 file can not be played on images without the \"commercial\" flag. "
+ }
+ },
+ "summary": "media_player_-_unable_to_play_MPEG-1_without_\"commercial\"_flag"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.media_player_-_play_video_(ogv)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogv file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch media player and make sure it can play the ogv file",
+ "expected_results": "ogv file can be played without problem"
+ }
+ },
+ "summary": "media_player_-_play_video_(ogv)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.media_player_-_stop/play_button_(ogv)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogv file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "launch media player can play the ogv file",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "click \"stop\" button to stop playing",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "click \"start\" button to resume playing",
+ "expected_results": "ogv file can be start/stop without problem"
+ }
+ },
+ "summary": "media_player_-_stop/play_button_(ogv)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.audio_-_play_(ogg)_with_HDMI",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample ogg file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "connect system with a monitor with HDMI",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player and play the ogg file",
+ "expected_results": "ogg file can be played without problem with HDMI"
+ }
+ },
+ "summary": "audio_-_play_(ogg)_with_HDMI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.audio_-_play_(wav)_with_HDMI",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "copy sample wav file to system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "connect system with a monitor with HDMI",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "launch media player and play the wav file",
+ "expected_results": "wav file can be played without problem, with HDMI"
+ }
+ },
+ "summary": "audio_-_play_(wav)_with_HDMI"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.Graphics_-_ABAT",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Download ABAT test suite from internal git repository, git clone git://tinderbox.sh.intel.com/git/abat",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Apply following patch to make it work on yocto environment",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Run \"./abat.sh\" to run ABAT test refer to abat.patch",
+ "expected_results": "All ABAT test should pass. \nNote : If below 3 fails appears ignore them. \n- start up X server fail.. due is already up \n- module [intel_agp] \n- module [i915]"
+ }
+ },
+ "summary": "Graphics_-_ABAT"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-tools.Graphics_-_x11perf_-_2D",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run \"x11perf -aa10text\" and \"x11perf -rgb10text\"",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Get the FPS result and compare it with upstream graphics data on Sandybridge",
+ "expected_results": "There should not be big regression between Yocto and upstream linux . "
+ }
+ },
+ "summary": "Graphics_-_x11perf_-_2D"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-oe-core.Test_Run_Integrity_-_Check_that_image_is_buildable",
+ "author": [
+ {
+ "email": "corneliux.stoicescu@intel.com",
+ "name": "corneliux.stoicescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Check that image can be built using either of the following methods: \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Check that image is built by autobuilder \nPlease check at: https://autobuilder.yocto.io/pub/releases/ \nChoose the target release that you are validating. \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build image yourself \nPreferred to build an core-image-sato-dev to ease the process of the dependent test cases in this run. \nNote: Please set MACHINE in conf/local.conf ",
+ "expected_results": "If either method fails, this test case will be failed and dependent test cases will be blocked. "
+ }
+ },
+ "summary": "Test_Run_Integrity_-_Check_that_image_is_buildable"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Check_if_SATA_disk_can_work_correctly",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run fdisk command to create partition on SATA disk. ",
+ "expected_results": "The SATA device can mount, umount, read and write. "
+ },
+ "2": {
+ "action": "Mount/Umount \n mke2fs /dev/sda1 \n mount -t ext2 /dev/sda1 /mnt/disk \n umount /mnt/disk",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Read/Write (filesystem) \n touch /mnt/disk/test.txt \n echo abcd > /mnt/disk/test.txt \n cat /mnt/disk/test.txt",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Read/Write (raw) \n dd if=/dev/sda1 of=/tmp/test bs=1k count=1k \n This command will read 1MB from /dev/sda1 to /tmp/test",
+ "expected_results": ""
+ }
+ },
+ "summary": "Check_if_SATA_disk_can_work_correctly"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Install_and_boot_from_USB-drive_to_HDD-drive",
+ "author": [
+ {
+ "email": "david.israelx.rodriguez.castellanos@intel.com",
+ "name": "david.israelx.rodriguez.castellanos@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a HDD drive.",
+ "expected_results": "User can choose install system from USB stick on HDD drive from boot menu or command line option \n"
+ },
+ "2": {
+ "action": "Plugin USB which contains live image burned (USB1).",
+ "expected_results": "Installed system can boot up."
+ },
+ "3": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Boot the device and select option \"Install\" from boot menu.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Make sure that the divice in which image is going to be installed is the HDD drive.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Proceed through default install process.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Remove USB1, and reboot into new installed system.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Install_and_boot_from_USB-drive_to_HDD-drive"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Install_and_boot_from_USB-drive_to_SD-drive",
+ "author": [
+ {
+ "email": "david.israelx.rodriguez.castellanos@intel.com",
+ "name": "david.israelx.rodriguez.castellanos@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a SD-drive with enough free space to install an image.",
+ "expected_results": "User can choose install system from USB stick on SD-drive from boot menu or command line option. \n"
+ },
+ "2": {
+ "action": "Plugin USB which contains live image burned (USB1).",
+ "expected_results": "Installed system can boot up."
+ },
+ "3": {
+ "action": "Configure device BIOS to firstly boot from USB if necessary",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Boot the device and select option \"Install\" from boot menu.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Make sure that the device in which image is going to be installed is the SD-drive.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Proceed through default install process.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Remove USB1, and reboot into new installed system.",
+ "expected_results": ""
+ }
+ },
+ "summary": "Install_and_boot_from_USB-drive_to_SD-drive"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_boot_on_serial_communication_SD",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1.- Create a yocto project image in a SD card \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"",
+ "expected_results": "Verify you can create a live image \n"
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_SD"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_boot_on_serial_communication_HDD",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1 - Create a yocto project image in a HDD \nexample \n2 - Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide \n3 - Verify the Minow Max board is connected to the host \n4 - Boot the system to desktop \n5 - Open a Terminal and check the IP \nIn Terminal type $ifconfig\"> ",
+ "expected_results": "Verify you can create a live image \n"
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_HDD"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_boot_on_serial_communication_USB",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "1.- Create a yocto project image in a USB \nexample <dd if= core-image-sato-sdk.hddimg of =/dev/sdb>",
+ "expected_results": "Verify you can create a live image \n"
+ },
+ "2": {
+ "action": "Configure a connection like shown in the link avobe: \nhttps://wiki.yoctoproject.org/wiki/MinnowMax_board_Serial_video_connection_guide\n\n",
+ "expected_results": "Video signal is present and not delayed \n"
+ },
+ "3": {
+ "action": " Verify the Minow Max board is connected to the host",
+ "expected_results": "Verify the system boot ok and no errors are present \n"
+ },
+ "4": {
+ "action": " Boot the system to desktop",
+ "expected_results": " Check that a valid IP is retrieved"
+ },
+ "5": {
+ "action": " Open a Terminal and check the IP \nIn Terminal type $ifconfig\" ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Test_boot_on_serial_communication_USB"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-hw.bsps-runtime.Test_Seek_bar_and_volume_control",
+ "author": [
+ {
+ "email": "juan.fernandox.ramos.frayle@intel.com",
+ "name": "juan.fernandox.ramos.frayle@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run media player and load a media file ",
+ "expected_results": "Media player correctly open audio/video file \n"
+ },
+ "2": {
+ "action": "Verify that seek and volume control are present ",
+ "expected_results": "Seek bar and volume control are present \n"
+ },
+ "3": {
+ "action": "Verify that selecting the speaker icon opens the volume control",
+ "expected_results": "Volume control bar must appear \n"
+ },
+ "4": {
+ "action": "Verify you can increase and decrease volume level with the volume control",
+ "expected_results": "Volume level must be increased and decreased \n"
+ },
+ "5": {
+ "action": "Observe that slider on the seek bar moves along with the video/audio play",
+ "expected_results": "Video/audio file can be played and slider moves along with the video/audio play \n"
+ },
+ "6": {
+ "action": "Verify you can navigate the video with the slider back and forward",
+ "expected_results": "The slider can move back and forward in the seek bar \n"
+ },
+ "7": {
+ "action": "Verify that seek and volume control are functional in full screen mode",
+ "expected_results": "Press the full screen mode icon, seek bar and volume control must work fine \n"
+ },
+ "8": {
+ "action": "Verify that pressing << or >> while playing a file makes the slide goes slow/backwards or faster",
+ "expected_results": "Verify << and >> works correctly"
+ }
+ },
+ "summary": "Test_Seek_bar_and_volume_control"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/bsp-qemu.json b/poky/meta/lib/oeqa/manual/bsp-qemu.json
new file mode 100644
index 0000000..1260af4
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/bsp-qemu.json
@@ -0,0 +1,222 @@
+[
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-tools.qemu_can_be_started_with_KVM_enabled",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Build a kernel with KVM enabled \n\nIn Local.conf add \n\nQEMU_USE_KVM = \"${@ 'intel-corei7-64 intel-core2-32 qemux86 qemux86-64' if os.access('/dev/kvm', os.R_OK|os.W_OK) else '' }\" \n\n ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Start qemu with option \"kvm\" with runqemu \n a. If you start qemu with kvm failed, maybe it is because host not install kvm and vhost_net module. Follow below link to install them. \n b. vhost_test refer: https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM \n c. kvm refer: https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Check if qemu starts up and if kvm_intel module is used",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "If kvm_intel module is not used when starting qemu, it will show 0 in \"Used by\" column when you run \"lsmod | grep kvm_intel\" ",
+ "expected_results": "KVM enabled with qemu \nExecute \"lsmod | grep kvm_intel\" from your host twice, before and after you \nstart the qemu with kvm option. Before start, the number should be 0, \nafter start, the number should bigger than 0."
+ }
+ },
+ "summary": "qemu_can_be_started_with_KVM_enabled"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-tools.Post-installation_logging",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Download the poky source and set environment \n",
+ "expected_results": "The /var/log/postinstall.log should exist in the first boot. The content of this log is like below: \n\nRunning postinst /etc/rpm-postinsts/man... \nList directory to check the output log \nbin \nboot \ndev \netc \nhome \nlib \nlost+found \nmedia \nmnt \nproc \nrun \nsbin \nsys \ntmp \nusr \nvar \nList nonexist directory to check the stderr redirection log \nls: /nonexist: No such file or directory "
+ },
+ "2": {
+ "action": "Add the following lines to a .bb file. For expample, meta/recipes-connectivity/openssh/openssh_6.2p2.bb: \n\npkg_postinst_ontarget_${PN} () { \n #!/bin/sh -e \n if [ x\"$D\" = \"x\" ]; then \n echo \"List directory to check the output log\" \n ls / \n echo \"List nonexist directory to check the stderr redirection log\" \n ls /nonexist \n else \n exit 1 \n fi \n} \n\nMake sure the feature \"debug-tweaks\" is added in conf/local.conf \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Add ssh-server-openssh to EXTRA_IMAGE_FEATURES in local.conf \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Build core-image-minimal \n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Boot up the image and check the /var/log/postinstall.log ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Post-installation_logging"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-tools.Add_user_with_cleartext_type_password_during_filesystem_construction",
+ "author": [
+ {
+ "email": "ke.zou@windriver.com",
+ "name": "ke.zou@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Download the poky source and set the environment \n\n",
+ "expected_results": "No error during image building procedure. \n"
+ },
+ "2": {
+ "action": "Add the following lines in conf/local.conf \n\nINHERIT += \"extrausers\" \n\nEXTRA_USERS_PARAMS = \"\\ \nuseradd -s /bin/sh -P 'tester3' tester3;\\ \n\" \n\nThe above settings do the following things: \na. Add a user tester3 with cleartext password 'tester3' ",
+ "expected_results": "Image can boot up \n"
+ },
+ "3": {
+ "action": "Build the image\n ",
+ "expected_results": "Login with user name \"tester3\" and password \"tester3\" "
+ }
+ },
+ "summary": "Add_user_with_cleartext_type_password_during_filesystem_construction"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-tools.rpm_-__install_dependency_package",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get a not previously installed RPM package or build one on local machine, which should have run-time dependency.For example, \"mc\" (Midnight Commander, which is a visual file manager) should depend on \"ncurses-terminfo\". \n\n$ bitbake mc \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Copy the package into a system folder (for example /home/root/rpm_packages). \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Run \"rpm -ivh package_name\" and check the output, for example \"rpm -ivh mc.rpm*\" should report the dependency on \"ncurses-terminfo\".\n\n\n\n",
+ "expected_results": "3 . rpm command should report message when some RPM installation depends on other packages."
+ }
+ },
+ "summary": "rpm_-__install_dependency_package"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-tools.Check_rpm_install/removal_log_file_size(auto)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get some rpm or other kind of installation packages. \n\n",
+ "expected_results": "Steps 1- 4 (more than 2.3) \nEach file will occupy around 10MB, and there should be some method to keep rpm log in a small size. (the size of the db of RPMs must not be taking so much space) \nStep 5 (less than or equal to 2.3)\nThe size on /var/lib/rpm/ must keep around 30MB"
+ },
+ "2": {
+ "action": "After system is up, check the size of log file named as \"log.xxxxxx\" on /var/lib/rpm/log \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "After several install/removal of packages, with either of the install/removal commands (rpm/smart/zypper/dnf install/removal), check again the size of log file. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "For packages installation, there will be some database files under /var/lib/rpm/, named as \"__db.xxx\" and there will be some log files \nunder /var/lib/rpm/log, named as \"\"log.xxxxxx\"\". \n\nNote: You will only see the log.xxxx on /var/lib/rpm/log mentioned above if the poky version is minor than 2.3.For poky 2.3 or major versions this has been modified and the package RPM4 does not show the logs.xxxx. if major, follow the next step. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Repeat steps (1 and 3) and check the size of /var/lib/rpm/ \n\nMore info: https://bugzilla.yoctoproject.org/show_bug.cgi?id=9259",
+ "expected_results": ""
+ }
+ },
+ "summary": "Check_rpm_install/removal_log_file_size"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-runtime.only_one_connmand_in_background(auto)",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Boot system",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Run \"ps aux |grep connmand\" or \"ps -ef | grep connmand\" or \"ps | grep connmand\"",
+ "expected_results": "Connmand (connection manager, used to manage internet connections) should be shown as an active process \n\n"
+ },
+ "3": {
+ "action": "Run command \"connmand\" to try to launch to a second connmand process",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Check, with \"ps\" connmand if a second connmand can be generated ",
+ "expected_results": "There should be only one connmand process instance in background ."
+ }
+ },
+ "summary": "only_one_connmand_in_background"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-runtime.X_server_can_start_up_with_runlevel_5_boot",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "boot up system with default runlevel \n\n",
+ "expected_results": "X server can start up well and desktop display has no problem . \n\n"
+ },
+ "2": {
+ "action": "type runlevel at command prompt",
+ "expected_results": "Output:N 5"
+ }
+ },
+ "summary": "X_server_can_start_up_with_runlevel_5_boot"
+ }
+ },
+ {
+ "test": {
+ "@alias": "bsps-qemu.bsps-runtime.check_bash_in_image",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "After system is up, check if bash command exists with command \"which bash\"",
+ "expected_results": "bash command should exist in image giving something as below \"/bin/bash\""
+ }
+ },
+ "summary": "check_bash_in_image"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/build-appliance.json b/poky/meta/lib/oeqa/manual/build-appliance.json
new file mode 100644
index 0000000..b8f8927
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/build-appliance.json
@@ -0,0 +1,122 @@
+[
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Bitbake_build-appliance-image",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Get poky source code and prepare the build environment",
+ "expected_results": "bitbake build-appliance-image is successful "
+ },
+ "2": {
+ "action": "Set MACHINE to qemux86 and add the following line to conf/local.conf : SRCREV_pn-build-appliance-image = \"${AUTOREV}\"",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Run \"bitbake build-appliance-image\" \n \n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Bitbake_build-appliance-image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Build_core-image-minimal_with_build-appliance-image",
+ "author": [
+ {
+ "email": "corneliux.stoicescu@intel.com",
+ "name": "corneliux.stoicescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Boot the image under VMWare Player. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build qemux86 core-image-minimal using bitbake command line in the build-appliance-image ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Launch the image just built using runqemu. ",
+ "expected_results": "core-image-minimal should build and boot. "
+ }
+ },
+ "summary": "Build_core-image-minimal_with_build-appliance-image"
+ }
+ },
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Build_a_image_without_error_(added_recipe)",
+ "author": [
+ {
+ "email": "sstncr@gmail.com",
+ "name": "sstncr@gmail.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch Build Appliance",
+ "expected_results": "User could build a image without error and the added package is in the image"
+ },
+ "2": {
+ "action": "Set \"Machine\" in conf/local.conf, for example, qemuarm",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Install a new package to the image, for example, acpid. Set the following line in conf/local.conf: IMAGE_INSTALL_append = \" acpid\"",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Build a image using bitbake command line, for example, bitbake core-image-minimal",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "After build finished, launch the image and check if the added package built into image",
+ "expected_results": ""
+ }
+ },
+ "summary": "Build_a_image_without_error_(added_recipe)."
+ }
+ },
+ {
+ "test": {
+ "@alias": "build-appliance.build-appliance.Create_core-image-sato-sdk_using_build_appliance",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Build with AUTOREV or download from Autobuilder an image for Yocto Build Appliance. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Boot the image under VMWare Player. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build qemux86 core-image-sato-sdk using bitbake command line in the build-appliance-image ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Launch the image just built using runqemu. ",
+ "expected_results": ""
+ }
+ },
+ "summary": "Create_core-image-sato-sdk_using_build_appliance"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/crops.json b/poky/meta/lib/oeqa/manual/crops.json
new file mode 100644
index 0000000..1cf3c8f
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/crops.json
@@ -0,0 +1,294 @@
+[
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_eSDK_devtool_build_make",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces."
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n "
+ },
+ "8": {
+ "action": "devtool add myapp <directory>(this is myapp dir) \n\n\n",
+ "expected_results": "The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb"
+ },
+ "9": {
+ "action": " devtool build myapp \n\n",
+ "expected_results": "This should compile an image"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_eSDK_devtool_build_make"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_build_esdk_package",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp/ \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include<stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": " this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "8": {
+ "action": " devtool add myapp <directory> (this is myapp dir) \n\n",
+ "expected_results": " The directory you should input is the myapp directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
+ },
+ "9": {
+ "action": " devtool package myapp \n\n",
+ "expected_results": " you should expect a package creation of myapp and it should be under the /tmp/deploy/ \n\n"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase.\n</package_format>"
+ }
+ },
+ "summary": "sdkext_devtool_build_esdk_package"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_build_cmake",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 5 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " Create the following tree of files <crops-esdk-workdir-workspace>/sdkext/files/myapp \n <crops-esdk-workdir-workspace>/sdkext/files/myapp_cmake \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " Create the following files withing the myapp directory myapp.c and the Makefile. Write the following inside of each file: \n---------------------------------------- \nMakefile should contain \n\nall: myapp \n\nmyapp: myapp.o \n\t$(CC) $(LDFLAGS) $< -o $@ \n\nmyapp.o: myapp.c \n\t$(CC) $(CFLAGS) -c $< -o $@ \n\nclean: \n\trm -rf myapp.o myapp \n\n----------------------------- \nmyapp.c shold contain \n\n#include <stdio.h> \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n \n\treturn 0; \n} \n------------------------------------ \n\n",
+ "expected_results": "be sure that the indentations on the makefile are tabs not spaces. \n\n"
+ },
+ "5": {
+ "action": " Create the following files within the myapp_cmake directory CMakeLists.txt and myapp.c. Write the following inside each file: \n\n------------------------------------ \nCMakeLists.txt should contain: \n\ncmake_minimum_required (VERSION 2.6) \nproject (myapp) \n# The version number. \nset (myapp_VERSION_MAJOR 1) \nset (myapp_VERSION_MINOR 0) \n\n# add the executable \nadd_executable (myapp myapp.c) \n\ninstall(TARGETS myapp \nRUNTIME DESTINATION bin) \n\n------------------------------------------ \nmyapp.c should contain: \n\n#include \n\nint \nmain(int argc, char *argv[]) \n{ \n\tprintf(\"Hello world\\n\"); \n\n\treturn 0; \n} \n------------------------------------------------- \n\n",
+ "expected_results": "Be sure that the indentations on CMakeLists.txt is tabs not spaces. \n\n"
+ },
+ "6": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "7": {
+ "action": " run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "8": {
+ "action": " devtool add myapp <directory> (this is myapp_cmake dir) \n\n",
+ "expected_results": "The directory you should input is the myapp_cmake directory. This should automatically create the recipe myapp.bb under <crops-esdk-workdir-workspace>/recipes/myapp/myapp.bb \n\n"
+ },
+ "9": {
+ "action": " devtool build myapp \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "10": {
+ "action": " devtool reset myapp ",
+ "expected_results": "This cleans sysroot of the myapp recipe, but it leaves the source tree intact. meaning it does not erase. "
+ }
+ },
+ "summary": "sdkext_devtool_build_cmake"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_extend_autotools_recipe_creation",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": " source environment-setup-i586-poky-linux \n\n",
+ "expected_results": " This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": "devtool sdk-install -s libxml2 \n\n",
+ "expected_results": "this should install libxml2 \n\n"
+ },
+ "6": {
+ "action": "devtool add librdfa https://github.com/rdfa/librdfa \n\n",
+ "expected_results": "This should automatically create the recipe librdfa.bb under /recipes/librdfa/librdfa.bb \n\n"
+ },
+ "7": {
+ "action": "devtool build librdfa \n\n",
+ "expected_results": "This should compile \n\n"
+ },
+ "8": {
+ "action": "devtool reset librdfa ",
+ "expected_results": "This cleans sysroot of the librdfa recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_extend_autotools_recipe_creation"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_devtool_kernelmodule",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": " Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n \n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": "devtool add v4l2loopback-driver https://github.com/umlaeute/v4l2loopback.git \n\n",
+ "expected_results": "This should automatically create the recipe v4l2loopback-driver.bb under <crops-esdk-workdir-workspace>/recipes/v4l2loopback-driver/v4l2loopback-driver.bb "
+ },
+ "6": {
+ "action": "devtool build v4l2loopback-driver \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "7": {
+ "action": "devtool reset v4l2loopback-driver ",
+ "expected_results": "This cleans sysroot of the v4l2loopback-driver recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_devtool_kernelmodule"
+ }
+ },
+ {
+ "test": {
+ "@alias": "crops-default.crops-default.sdkext_recipes_for_nodejs",
+ "author": [
+ {
+ "email": "francisco.j.pedraza.gonzalez@intel.com",
+ "name": "francisco.j.pedraza.gonzalez@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "IMPORTANT NOTE: The firsts 2 steps refer to configuration of the environment to run the rest of the steps. These only apply for CROPS-eSDK. \n\n\nlets say variable npm = npm://registry.npmjs.org;name=winston;version=2.2.0 \n\n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Initiate your Crops-esdk environment as it says in wiki https://github.com/crops/docker-win-mac-docs/wiki \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source environment-setup-i586-poky-linux \n\n",
+ "expected_results": "This should output a message that says SDK environment now set up; additionally you may now run devtool to perform development tasks etc etc ... \n\n"
+ },
+ "4": {
+ "action": "run command which devtool \n\n",
+ "expected_results": "this should output the directory of the devtool script and it should be within the sdk workdir you are working in. \n\n"
+ },
+ "5": {
+ "action": " 4a) git clone git://git.openembedded.org/meta-openembedded in layers/build directory \n \n4b) Add meta-openembedded/meta-oe in bblayer.conf as mentioned below: ${SDKBASEMETAPATH}/layers/build/meta-openembedded/meta-oe \\ \n\n4c) devtool add \"npm://registry.npmjs.org;name=npm;version=2.2.0\" \n\n",
+ "expected_results": " This should automatically create the recipe npm.bb under /recipes/npm/npm.bb \n\n"
+ },
+ "6": {
+ "action": "devtool build npm \n\n",
+ "expected_results": "This should compile an image \n\n"
+ },
+ "7": {
+ "action": " devtool reset npm",
+ "expected_results": "This cleans sysroot of the npm recipe, but it leaves the source tree intact. meaning it does not erase."
+ }
+ },
+ "summary": "sdkext_recipes_for_nodejs"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/eclipse-plugin.json b/poky/meta/lib/oeqa/manual/eclipse-plugin.json
new file mode 100644
index 0000000..9869150
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/eclipse-plugin.json
@@ -0,0 +1,322 @@
+[
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.support_SSH_connection_to_Target",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "In Eclipse, swich to Remote System Explorer to create a connention baseed on SSH, input the remote target IP address as the Host name, make sure disable the proxy in Window->Preferences->General->Network Connection, set Direct as Active Provider field. ",
+ "expected_results": "the connection based on SSH could be set up."
+ },
+ "2": {
+ "action": "Configure connection from Eclipse: Run->Run Configurations->C/C++ Remote Application\\ ->New Connection->General->SSH Only ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Then right click to connect, input the user ID and password. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "expand the connection, it will show the Sftp Files etc. \nNOTE. Might need to change dropbear to openssh and add the packagegroup-core-eclipse-debug recipe",
+ "expected_results": ""
+ }
+ },
+ "summary": "support_SSH_connection_to_Target"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Launch_QEMU_from_Eclipse",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Set the Yocto ADT's toolchain root location, sysroot location and kernel, in the menu Window -> Preferences -> Yocto ADT. \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/qemu (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
+ "expected_results": " Qemu can be lauched normally."
+ },
+ "3": {
+ "action": "(a)Point to the Toolchain: \n \nIf you are using a stand-alone pre-built toolchain, you should be pointing to the /opt/poky/{test-version} directory as Toolchain Root Location. This is the default location for toolchains installed by the ADT Installer or by hand. If ADT is installed in other location, use that location as Toolchain location.\nIf you are using a system-derived toolchain, the path you provide for the Toolchain Root Location field is the Yocto Project's build directory. \n \n E.g:/home/user/yocto/poky/build \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "(b)Specify the Sysroot Location: \nSysroot Location is the location where the root filesystem for the target hardware is created on the development system by the ADT Installer (SYSROOT in step 2 of the case ADT installer Installation). \n \n Local : e.g: /home/user/qemux86-sato-sdk \nUsing ADT : e.g :/home/user/test-yocto/qemux86 \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "(c)Select the Target Architecture: \n \nThe target architecture is the type of hardware you are going to use or emulate. Use the pull-down Target Architecture menu to make your selection. \n \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "(e) select OK to save the settings. \n\n\n1: In the Eclipse toolbar, expose the Run -> External Tools menu. Your image should appear as a selectable menu item. \n2: Select your image in the navigation pane to launch the emulator in a new window. \n3: If needed, enter your host root password in the shell window at the prompt. This sets up a Tap 0 connection needed for running in user-space NFS mode. \n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Launch_QEMU_from_Eclipse"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch a QEMU of target enviroment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Select File -> New -> Project.",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Double click C/C++.",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click C or C++ Project to create the project.",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Expand Yocto ADT Project.",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Select Hello World ANSI C Autotools Project.",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Click Next.",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Add information in the Author and Copyright notice fields. \n1",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click Finish. \n1",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "In the Project Explorer window, right click the project -> Build project. \n1",
+ "expected_results": "Under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
+ },
+ "14": {
+ "action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
+ "expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
+ },
+ "15": {
+ "action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \nRight click it again and Debug as -> Debug Configurations \nUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \nin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application.\ne.g.: /home/root/myapplication \nIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
+ "expected_results": ""
+ },
+ "17": {
+ "action": "After all settings are done, select the Debug button on the bottom right corner",
+ "expected_results": ""
+ }
+ },
+ "summary": "Relocatable_SDK_-_C_-_Build_Hello_World_ANSI_C_Autotools_Project"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Launch a QEMU of target enviroment.(Reference to case \"ADT - Launch qemu by eclipse\") ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Select File -> New -> Project. ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Double click C/C++. ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Click C or C++ Project to create the project. ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Expand Yocto ADT Project. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Select Hello World ANSI C++ Autotools Project. ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Put a name in the Project name. Do not use hyphens as part of the name. \n \n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Click Next.",
+ "expected_results": ""
+ },
+ "9": {
+ "action": "Add information in the Author and Copyright notice fields.",
+ "expected_results": ""
+ },
+ "10": {
+ "action": "Click Finish. \n1",
+ "expected_results": ""
+ },
+ "11": {
+ "action": "If the \"open perspective\" prompt appears, click \"Yes\" so that you open the C/C++ perspective. \n1",
+ "expected_results": ""
+ },
+ "12": {
+ "action": "In the Project Explorer window, right click the project -> Reconfigure project. \n1",
+ "expected_results": ""
+ },
+ "13": {
+ "action": "In the Project Explorer window, right click the project -> Build project. \n\n1",
+ "expected_results": "under the Project files, a new folder appears called Binaries. This indicates that the compilation have been successful and the project binary have been created. \n"
+ },
+ "14": {
+ "action": "Right click it again and Run as -> Run Configurations. \n\t\t\tUnder Run Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. e.g.: /home/root/myapplication \n\t\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button. \n1",
+ "expected_results": "step 14 to step 16 -> Build succeed and the console outputs Hello world, you can also check the output on target."
+ },
+ "15": {
+ "action": "After all settings are done, select the Run button on the bottom right corner \n\n1",
+ "expected_results": ""
+ },
+ "16": {
+ "action": "Repeat the steps 14-15, but instead of using Run Configurations use Debug Configurations: \n\t\tRight click it again and Debug as -> Debug Configurations \n\t\tUnder Debug Configurations expand \"C/C++ Remote Application\". A configuration for the current project should appear. Clicking it will display the configuration settings. \n\t\tin \"C/C++ Application\" field input Remote Absolute File path for C/C++ Application. \n\t\te.g.: /home/root/myapplication \n\t\tIn \"Connection\" drop-down list make sure a TCF connection is set up for your target. If not, create a new one by clicking the New button \n1",
+ "expected_results": ""
+ },
+ "17": {
+ "action": "After all settings are done, select the Debug button on the bottom right corner",
+ "expected_results": ""
+ }
+ },
+ "summary": "Relocatable_SDK_-_C++_-_Build_Hello_World_C++_Autotools_project"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Build_Eclipse_Plugin_from_source",
+ "author": [
+ {
+ "email": "laurentiu.serban@intel.com",
+ "name": "laurentiu.serban@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Clone eclipse-poky source. \n \n - git clone git://git.yoctoproject.org/eclipse-poky \n\n",
+ "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on http://autobuilder.yoctoproject.org/pub/releases/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
+ },
+ "2": {
+ "action": "Checkout correct tag. \n\n - git checkout <eclipse-version>/<yocto-version> \n\n",
+ "expected_results": "After plugin is build you must have 4 archive in foder scripts from eclipse-poky: \n - org.yocto.bc - mars-master-$date.zip \n - org.yocto.doc - mars-master-$date.zip --> documentation \n - org.yocto.sdk - mars-master-$date.zip \n - org.yocto.sdk - mars-master-$date.-archive.zip --> plugin "
+ },
+ "3": {
+ "action": "Move to scripts/ folder. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Run ./setup.sh \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "When the script finishes, it prompts a command to issue to build the plugin. It should look similar to the following: \n\n$ ECLIPSE_HOME=/eclipse-poky/scripts/eclipse ./build.sh /&1 | tee -a build.log \n\nHere, the three arguments to the build script are tag name, branch for documentation and release name. \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "On an eclipse without the Yocto Plugin, select \"Install New Software\" from Help pull-down menu \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Select Add and from the dialog choose Archive... Look for the *archive.zip file that was built previously with the build.sh script. Click OK. \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Select all components and proceed with Installation of plugin. Restarting eclipse might be required.\n",
+ "expected_results": ""
+ }
+ },
+ "summary": "Build_Eclipse_Plugin_from_source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "eclipse-plugin.eclipse-plugin.Eclipse_Poky_installation_and_setup",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Install SDK \n\ta)Download https://autobuilder.yocto.io/pub/releases//toolchain/x86_64/poky-glibc-x86_64-core-\timage-sato-i586-toolchain-.sh \n\tb)Run the SDK installer and accept the default installation directory ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) \n\ta) Go to https://www.eclipse.org/downloads/packages/all, click \"Oxygen R\" \n\tb) Click to download the build for your OS \n\tc) Click \"Download\" button to download from a mirror \n\td) Run \"tar xf\" to extract the downloaded archive ",
+ "expected_result": ""
+ },
+ "3": {
+ "action": "Install \"Eclipse IDE for C/C++ Developers\" Oxygen release (4.7.0) (Continue) \n\te) Run \"eclipse/eclipse\" to start Eclipse \n\tf) Optional step for host machine within Intel network: In Eclipse workbench window, go to \"Window\" menu -> \"Preferences...\". \n\tg) In \"Preferences\" dialog, go to \"General\" -> \"Network Connections\", set \"Active Provider\" to \"Manual\". In \"Proxy \tentries\" table, select HTTP and click \"Edit\" and enter host \"proxy-chain.intel.com\" port 911, click OK. Repeat for HTTPS with port 912 \nClick OK to close \"Preferences\" dialog. \n\th) Go to \"File\" menu -> \"Restart\" to restart Eclipse for proxy settings to take effect. ",
+ "expected_result": ""
+ },
+ "4": {
+ "action": "Install Eclipse Poky plugins \n\ta) Download https://autobuilder.yocto.io/pub/releases/<yocto-version>/eclipse-plugin/<eclipse-version>/org.yocto.sdk-development-<date>-archive.zip \n\tb) In Eclipse workbench window, go to \"Help\" menu -> \"Install New Software...\" \n\tc) In \"Install\" dialog, click \"Add...\" button \n\td) In \"Add Repository\" dialog, enter \"Eclipse Poky\" for (repository) Name, click \"Archive...\" ",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "Install Eclipse Poky plugins (continue) \n\te) In \"Repository archive\" browse dialog, select the downloaded Eclipse Poky repository archive \n\tf) Back in \"Add Repository\" dialog, click \"OK\" \n\tg) Back in \"Install\" dialog, make sure \"Work with:\" is set to \"Eclipse Poky\" repository, tick \"Yocto Project \tDocumentation Plug-in\" and \"Yocto Project SDK Plug-in\", click \"Next >\" and verify plugins/features name/version, \tclick \"Next >\" and accept license agreement, click \"Finish\" \n\th) If \"Security Warning\" dialog appears, click \"OK\" to install unsigned content. \n\ti) In \"Software Updates\" dialog, click \"Yes\" to restart Eclipse to complete Eclipse Poky plugins installation. ",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "Setup Eclipse Poky to use SDK \n\ta) In Eclipse workbench window, go to \"Window\" menu -> \"Preferences\". \n\tb) In \"Preferences\" window, go to \"Yocto Project SDK\", in \"Cross Compiler Options\" frame, select \"Standalone pre-\tbuilt toolchain\". ",
+ "expected_results": "Eclipse Poky plugins installed and running successfully, e.g. observe that \"Yocto Project Tools\" menu is available on Eclipse workbench window."
+ }
+ },
+ "summary": "Eclipse_Poky_installation_and_setup"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/kernel-dev.json b/poky/meta/lib/oeqa/manual/kernel-dev.json
new file mode 100644
index 0000000..c93b4dd
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/kernel-dev.json
@@ -0,0 +1,200 @@
+[
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_defconfig",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_7 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_7",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_7"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_defconfig"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_defconfig+fragments",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_8 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_8",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_8"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_defconfig+fragments"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_Applying_patches",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_Applying_patches"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_linux-yocto-local-source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_2 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_2",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_2"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_linux-yocto-local-source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_linux-yocto-custom-local-source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_3 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_3",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_3"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_linux-yocto-custom-local-source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_recipe-space_meta",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_5 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_5",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_5"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_recipe-space_meta"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_External_source",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_6 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_6",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_6"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_External_source"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_building_external_modules(hello-mod)",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_10 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup_10",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_10"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_building_external_modules(hello-mod)"
+ }
+ },
+ {
+ "test": {
+ "@alias": "kernel-configuration.kernel-configuration.TCTEMP_2.3_MANUAL_Kernel_dev_local_parallel_meta",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Follow the Set Up procedure to complete the common and specific prerequisites for this test case. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Setup https://wikioproject.org/wiki/Kernel_Development_Test_Cases#Prerequisites_4 ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Execute the test case steps asdocumented on the \"Kernel Development Test Cases\" wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Steps_4",
+ "expected_results": "Review expected results on thethe \"Kernel Development Test Cases\"wiki. https://wiki.yoctoproject.org/wiki/Kernel_Development_Test_Cases#Expected_Results_4"
+ }
+ },
+ "summary": "TCTEMP_2.3_MANUAL_Kernel_dev_local_parallel_meta"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/oe-core.json b/poky/meta/lib/oeqa/manual/oe-core.json
new file mode 100644
index 0000000..d893d84
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/oe-core.json
@@ -0,0 +1,260 @@
+[
+ {
+ "test": {
+ "@alias": "oe-core.scripts.Use_scripts/pybootchartgui/pybootchartgui.py_to_generate_build_profiles",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Run a build for a recipe (e.g. core-image-minimal)",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Run the profiling script, ../scripts/pybootchartgui/pybootchartgui.py tmp/buildstats/ \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Verify the results ",
+ "expected_results": "The scripts generates svg files with the profiling results . "
+ }
+ },
+ "summary": "Use_scripts/pybootchartgui/pybootchartgui.py_to_generate_build_profiles"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.scripts.Crosstap_script_check",
+ "author": [
+ {
+ "email": "alexandru.c.georgescu@intel.com",
+ "name": "alexandru.c.georgescu@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Create the trace_open.stp script as follows in the host machine: \n\n\nprobe syscall.open \n\n{ \n\n\n printf (\"%s(%d) open (%s)\\n\", execname(), pid(), argstr) \n\n} \n\n\n\nif the above failed, then create the below instead. \n\nprobe syscall.open \n{ \n printf (\"%s(%d) open\\n\", execname(), pid()) \n\n} \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Add 'tools-profile' and 'ssh-server-openssh' to EXTRA_IMAGE_FEATURES in local.conf \n\n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Build a core-image-minimal image, build systemtap-native. Start the image under qemu. \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Make sure that the ssh service is started on the Qemu machine. \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "From the host machine poky build_dir, run \"crosstap root@192.168.7.2 trace_open.stp\".",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "In QEMU, try to open some applications, such as open a terminal, input some command, \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Check the host machine, \"crosstap\" has related output. \n\n\n\nNOTE: Do not build the kernel from shared state(sstate-cache) for this to work.",
+ "expected_results": "The script should successfully connect to the qemu machine and there \nshould be presented a list of services(pid, process name) which run on \nthe qemu machine. "
+ }
+ },
+ "summary": "Crosstap_script_check"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.scripts.List_all_the_PACKAGECONFIG's_flags",
+ "author": [
+ {
+ "email": "yi.zhao@windriver.com",
+ "name": "yi.zhao@windriver.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": " Download the poky source and setup the environment. ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Run \"../scripts/contrib/list-packageconfig-flags.py\" ",
+ "expected_results": "In step 2, will list available pkgs which have PACKAGECONFIG flags: \nPACKAGE NAME (or RECIPE NAME) PACKAGECONFIG FLAGS \n============================================================== \nalsa-tools-1.0.26.1 defaultval gtk+ \navahi-ui-0.6.31 defaultval python \nbluez4-4.101 alsa defaultval pie \n"
+ },
+ "3": {
+ "action": "Run \"../scripts/contrib/list-packageconfig-flags.py -f\" ",
+ "expected_results": "In step 3, will list available PACKAGECONFIG flags and all affected pkgs \nPACKAGECONFIG FLAG PACKAGE NAMES (or RECIPE NAMES) \n==================================== \n3g connman-1.16 \n \navahi cups-1.6.3 pulseaudio-4.0 \nbeecrypt rpm-5.4.9 rpm-native-5.4.9 \n"
+ },
+ "4": {
+ "action": "Run \"../scripts/contrib/list-packageconfig-flags.py -a\" ",
+ "expected_results": "In step 4, will list all pkgs and PACKAGECONFIG information: \n================================================== \ngtk+-2.24.18 \n/home/jiahongxu/yocto/poky/meta/recipes-gnome/gtk+/gtk+_2.24.18.bb \nPACKAGECONFIG x11 \nPACKAGECONFIG[x11] --with-x=yes --with-gdktarget=x11,--with-x=no,${X11DEPENDS} \nxf86-video-intel-2.21.9 \n/home/jiahongxu/yocto/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel_2.21.9.bb \nPACKAGECONFIG None \nPACKAGECONFIG[xvmc] --enable-xvmc,--disable-xvmc,libxvmc \nPACKAGECONFIG[sna] --enable-sna,--disable-sna \n"
+ },
+ "5": {
+ "action": "Run \"../scripts/contrib/list-packageconfig-flags.py -p\" ",
+ "expected_results": "In step 5, will list pkgs with preferred version: \nPACKAGE NAME (or RECIPE NAME) PACKAGECONFIG FLAGS \n=================================================== \nalsa-tools-1.0.26.1 defaultval gtk+ \navahi-ui-0.6.31 defaultval python \nbluez4-4.101 alsa defaultval pie \nbluez5-5.7 alsa defaultval obex-profiles \n\n\n\n "
+ }
+ },
+ "summary": "List_all_the_PACKAGECONFIG's_flags"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.Test_bitbake_menuconfig",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky \n \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky \n\n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build \n\n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "set below in local.conf \n\n \tMACHINE = \"qemux86\" \n\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "bitbake linux-yocto -c kernel_configme -f \n\n",
+ "expected_results": ""
+ },
+ "6": {
+ "action": "bitbake linux-yocto -c menuconfig \n\n",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Once menuconfig launches, use the interface to navigate through the selections and \n enable option \"64-bit kernel\" \n\n",
+ "expected_results": ""
+ },
+ "8": {
+ "action": "Save changes and set name of the file as \"test.config\" ",
+ "expected_results": "Open file: \n \npoky/build//tmp/work/qemux86-poky-linux/linux-yocto/4.X.X+*/linux-qemux86-standard-build/test.config \n \n \n\nand verify that changes are present in the file as follows: \n \nCONFIG_64BIT=y \n \nCONFIG_X86_64=y"
+ }
+ },
+ "summary": "Test_bitbake_menuconfig"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_bitbake_devshell",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "bitbake matchbox-desktop ",
+ "expected_results": "Package was build correctly "
+ },
+ "5": {
+ "action": "bitbake matchbox-desktop -c devshell ",
+ "expected_results": "A terminal with a shell prompt within the OpenEmbedded build environment is opened "
+ },
+ "6": {
+ "action": "Verify that \"matchbox-desktop\" binary file is not created under\"src\" directory ",
+ "expected_results": ""
+ },
+ "7": {
+ "action": "Run command:./configure && make ",
+ "expected_results": "Verify that \"matchbox-desktop\" binary file was created successfully under \"src/\" directory "
+ },
+ "8": {
+ "action": "Exit fromthe devshell terminal,exit ",
+ "expected_results": "Terminal back to the build directory"
+ }
+ },
+ "summary": "test_bitbake_devshell"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_dependency_explorer_is_launched",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky ",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env ",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "bitbake -u taskexp -g core-image-full-cmdline \n\nNOTE: To execute the last command of this test, it's necessary that the machine is executing an X11 server, or if that's not the case (for example, if running the test on a headless server), it is required to enable ssh X11 forwarding on both, the server and the client, and have the X11 server running on the client. \n\nThe instructions to enable X11 forwarding vary between distributions. But for example, these are the steps to enable it between a server running openSUSE Leap 42.1 and a client with Fedora 24: \nA. On the server, make sure /home//.ssh/config contains the line: \n ForwardX11 yes \nB. On the server, make sure xauth is installed by running: \n which xauth \nC. On the client, connect to the server, enabling X11 forwarding, for example by using: \n ssh -X user@server \nNOTE 2: depexp was renamed to taskexp on 2.3 M4",
+ "expected_results": "Verify that a \"dependency explorer\" is opened and file \n dependencies are listed "
+ }
+ },
+ "summary": "test_dependency_explorer_is_launched"
+ }
+ },
+ {
+ "test": {
+ "@alias": "oe-core.bitbake.test_bitbake_sane_error_for_invalid_layer",
+ "author": [
+ {
+ "email": "jose.perez.carranza@intel.com",
+ "name": "jose.perez.carranza@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "clone poky \n",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "cd poky \n \n",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "source oe-init-build-env && cd build \n \n",
+ "expected_results": ""
+ },
+ "4": {
+ "action": "Add a invalid layer to conf/bblayers.conf \"<poky dir>/my-invalid-layer\" \n\t\n",
+ "expected_results": ""
+ },
+ "5": {
+ "action": "bitbake core-image-minimal",
+ "expected_results": "Below error should be displayed:\n\"ERROR: Layer directory does not exist! Please check BBLAYERS in <poky dir>/<build dir>/conf/bblayers.conf\""
+ }
+ },
+ "summary": "test_bitbake_sane_error_for_invalid_layer"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/manual/sdk.json b/poky/meta/lib/oeqa/manual/sdk.json
new file mode 100644
index 0000000..6475586
--- /dev/null
+++ b/poky/meta/lib/oeqa/manual/sdk.json
@@ -0,0 +1,32 @@
+[
+ {
+ "test": {
+ "@alias": "sdk.sdk_runqemu.test_sdk_toolchain_can_run_multiple_QEMU_machines_under_UNFS",
+ "author": [
+ {
+ "email": "ee.peng.yeoh@intel.com",
+ "name": "ee.peng.yeoh@intel.com"
+ }
+ ],
+ "execution": {
+ "1": {
+ "action": "Prepare kernel, rootfs tar.bz2 image, and qemu configuration \n \ta. Download kernel, rootfs tar.bz2 image and qemu configuration from public autobuilder webpage \n \tb. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/machines/qemu/qemux86/ \n \tc. Download \n \t \ti. rootfs tar.bz2: core-image-sato-sdk-qemux86.tar.bz2 \n \t\tii. kernel: bzImage-qemux86.bin \n \t\tiii. qemu configuration: core-image-sato-sdk-qemux86.qemuboot.conf ",
+ "expected_results": ""
+ },
+ "2": {
+ "action": "Download & install sdk toolchain from public autobuilder \n \ta. Goto https://autobuilder.yocto.io/pub/releases/<target_release>/toolchain/x86_64/ \n \tb. Download poky-glibc-x86_64-core-image-sato-sdk-<type-arch>-toolchain-<release-version>.sh \n \tc. Run command: poky-glibc-x86_64-core-image-sato-sdk-<type-arch>-toolchain-<release-version>.sh",
+ "expected_results": ""
+ },
+ "3": {
+ "action": "Extract rootfs twice into two images \n \ta. Run 2 commands below: \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image1 \n runqemu-extract-sdk core-image-sato-sdk-qemux86.tar.bz2 qemux86_rootfs_image2",
+ "expected_results": ""
+ },
+ "4": {
+ "action": " From the 2 terminals, start qemu to boot up both two images \n \ta. Run 2 commands below: \n runqemu core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image1 \n runqemu core-image-sato-sdk-qemux86.qemuboot.conf qemux86_rootfs_image2 ",
+ "expected_results": "Expect both qemu to boot up successfully."
+ }
+ },
+ "summary": "test_sdk_toolchain_can_run_multiple_QEMU_machines_under_UNFS"
+ }
+ }
+]
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/runtime/cases/apt.py b/poky/meta/lib/oeqa/runtime/cases/apt.py
index 8d4dd35..793143f 100644
--- a/poky/meta/lib/oeqa/runtime/cases/apt.py
+++ b/poky/meta/lib/oeqa/runtime/cases/apt.py
@@ -18,7 +18,7 @@
@classmethod
def setUpClass(cls):
service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_DEB'], 'all')
- cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip, logger=cls.tc.logger)
cls.repo_server.start()
@classmethod
diff --git a/poky/meta/lib/oeqa/runtime/cases/dnf.py b/poky/meta/lib/oeqa/runtime/cases/dnf.py
index 2f87296..c1ed39d 100644
--- a/poky/meta/lib/oeqa/runtime/cases/dnf.py
+++ b/poky/meta/lib/oeqa/runtime/cases/dnf.py
@@ -55,7 +55,7 @@
@classmethod
def setUpClass(cls):
cls.repo_server = HTTPService(os.path.join(cls.tc.td['WORKDIR'], 'oe-testimage-repo'),
- cls.tc.target.server_ip)
+ cls.tc.target.server_ip, logger=cls.tc.logger)
cls.repo_server.start()
@classmethod
@@ -67,7 +67,8 @@
deploy_url = 'http://%s:%s/' %(self.target.server_ip, self.repo_server.port)
cmdlinerepoopts = ["--repofrompath=oe-testimage-repo-%s,%s%s" %(arch, deploy_url, arch) for arch in pkgarchs]
- self.dnf(" ".join(cmdlinerepoopts) + " --nogpgcheck " + command)
+ output = self.dnf(" ".join(cmdlinerepoopts) + " --nogpgcheck " + command)
+ return output
@OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
@OETestID(1744)
@@ -88,6 +89,9 @@
@OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
@OETestID(1740)
def test_dnf_install(self):
+ output = self.dnf_with_repo('list run-postinsts-dev')
+ if 'Installed Packages' in output:
+ self.dnf_with_repo('remove -y run-postinsts-dev')
self.dnf_with_repo('install -y run-postinsts-dev')
@OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
@@ -120,4 +124,44 @@
def test_dnf_reinstall(self):
self.dnf_with_repo('reinstall -y run-postinsts-dev')
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @OETestID(1771)
+ def test_dnf_installroot(self):
+ rootpath = '/home/root/chroot/test'
+ #Copy necessary files to avoid errors with not yet installed tools on
+ #installroot directory.
+ self.target.run('mkdir -p %s/etc' % rootpath, 1500)
+ self.target.run('mkdir -p %s/bin %s/sbin %s/usr/bin %s/usr/sbin' % (rootpath, rootpath, rootpath, rootpath), 1500)
+ self.target.run('mkdir -p %s/dev' % rootpath, 1500)
+ #Handle different architectures lib dirs
+ self.target.run('mkdir -p %s/lib' % rootpath, 1500)
+ self.target.run('mkdir -p %s/libx32' % rootpath, 1500)
+ self.target.run('mkdir -p %s/lib64' % rootpath, 1500)
+ self.target.run('cp /lib/libtinfo.so.5 %s/lib' % rootpath, 1500)
+ self.target.run('cp /libx32/libtinfo.so.5 %s/libx32' % rootpath, 1500)
+ self.target.run('cp /lib64/libtinfo.so.5 %s/lib64' % rootpath, 1500)
+ self.target.run('cp -r /etc/rpm %s/etc' % rootpath, 1500)
+ self.target.run('cp -r /etc/dnf %s/etc' % rootpath, 1500)
+ self.target.run('cp /bin/sh %s/bin' % rootpath, 1500)
+ self.target.run('mount -o bind /dev %s/dev/' % rootpath, 1500)
+ self.dnf_with_repo('install --installroot=%s -v -y --rpmverbosity=debug busybox run-postinsts' % rootpath)
+ status, output = self.target.run('test -e %s/var/cache/dnf' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+ status, output = self.target.run('test -e %s/bin/busybox' % rootpath, 1500)
+ self.assertEqual(0, status, output)
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @OETestID(1772)
+ def test_dnf_exclude(self):
+ excludepkg = 'curl-dev'
+ self.dnf_with_repo('install -y curl*')
+ self.dnf('list %s' % excludepkg, 0)
+ #Avoid remove dependencies to skip some errors on different archs and images
+ self.dnf_with_repo('remove --setopt=clean_requirements_on_remove=0 -y curl*')
+ #check curl-dev is not installed adter removing all curl occurrences
+ status, output = self.target.run('dnf list --installed | grep %s'% excludepkg, 1500)
+ self.assertEqual(1, status, "%s was not removed, is listed as installed"%excludepkg)
+ self.dnf_with_repo('install -y --exclude=%s --exclude=curl-staticdev curl*' % excludepkg)
+ #check curl-dev is not installed after being excluded
+ status, output = self.target.run('dnf list --installed | grep %s'% excludepkg , 1500)
+ self.assertEqual(1, status, "%s was not excluded, is listed as installed"%excludepkg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/ksample.py b/poky/meta/lib/oeqa/runtime/cases/ksample.py
new file mode 100644
index 0000000..354cc97
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/ksample.py
@@ -0,0 +1,221 @@
+import os
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+# need some kernel fragments
+# echo "KERNEL_FEATURES_append += \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
+class KSample(OERuntimeTestCase):
+ def cmd_and_check(self, cmd='', match_string=''):
+ status, output = self.target.run(cmd)
+ if not match_string:
+ # send cmd
+ msg = '%s failed, %s' % (cmd, output)
+ self.assertEqual(status, 0, msg=msg)
+ else:
+ # check result
+ result = ("%s" % match_string) in output
+ msg = output
+ self.assertTrue(result, msg)
+ self.assertEqual(status, 0, cmd)
+
+ def check_config(self, config_opt=''):
+ cmd = "zcat /proc/config.gz | grep %s" % config_opt
+ status, output = self.target.run(cmd)
+ result = ("%s=y" % config_opt) in output
+ if not result:
+ self.skipTest("%s is not set" % config_opt)
+
+ def check_module_exist(self, path='', module_name=''):
+ status, output = self.target.run("uname -r")
+ cmd = "ls " + "/lib/modules/" + output + "/kernel/samples/" + path + module_name
+ status, output = self.target.run(cmd)
+ if status != 0:
+ error_info = module_name + " doesn't exist"
+ self.skipTest(error_info)
+
+ def kfifo_func(self, name=''):
+ module_prename = name + "-example"
+ module_name = name + "-example.ko"
+ sysmbol_name = name + "_example"
+
+ # make sure if module exists
+ self.check_module_exist("kfifo/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("dmesg | grep \"test passed\" ", "test passed")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+ def kprobe_func(self, name=''):
+ # check config
+ self.check_config("CONFIG_KPROBES")
+
+ module_prename = name + "_example"
+ module_name = name + "_example.ko"
+ sysmbol_name = module_prename
+
+ # make sure if module exists
+ self.check_module_exist("kprobes/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("dmesg | grep Planted | head -n10", "Planted")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+ def kobject_func(self, name=''):
+ module_prename = name + "_example"
+ module_name = name + "-example.ko"
+ sysmbol_name = module_prename
+
+ # make sure if module exists
+ self.check_module_exist("kobject/", module_name)
+ # modprobe
+ self.cmd_and_check("modprobe %s" % module_prename)
+ # lsmod
+ self.cmd_and_check("lsmod | grep %s | cut -d\' \' -f1" % sysmbol_name, sysmbol_name)
+ # check result
+ self.cmd_and_check("ls /sys/kernel/%s/" % sysmbol_name, "bar")
+ # rmmod
+ self.cmd_and_check("rmmod %s" % module_prename)
+
+class KSampleTest(KSample):
+ # kfifo
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kfifo_test(self):
+ index = ["dma", "bytestream", "inttype", "record"]
+ for i in index:
+ self.kfifo_func(i)
+
+ # kprobe
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kprobe_test(self):
+ index = ["kprobe", "kretprobe"]
+ for i in index:
+ self.kprobe_func(i)
+
+ # kobject
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_kobject_test(self):
+ index = ["kobject", "kset"]
+ for i in index:
+ self.kobject_func(i)
+
+ #trace
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_trace_events(self):
+ # check config
+ self.check_config("CONFIG_TRACING_SUPPORT")
+ # make sure if module exists
+ self.check_module_exist("trace_events/", "trace-events-sample.ko")
+ # modprobe
+ self.cmd_and_check("modprobe trace-events-sample")
+ # lsmod
+ self.cmd_and_check("lsmod | grep trace_events_sample | cut -d\' \' -f1", "trace_events_sample")
+ # check dir
+ self.cmd_and_check("ls /sys/kernel/debug/tracing/events/ | grep sample-trace", "sample-trace")
+ # enable trace
+ self.cmd_and_check("echo 1 > /sys/kernel/debug/tracing/events/sample-trace/enable")
+ self.cmd_and_check("cat /sys/kernel/debug/tracing/events/sample-trace/enable")
+ # check result
+ status = 1
+ count = 0
+ while status != 0:
+ time.sleep(1)
+ status, output = self.target.run('cat /sys/kernel/debug/tracing/trace | grep hello | head -n1 | cut -d\':\' -f2')
+ if " foo_bar" in output:
+ break
+ count = count + 1
+ if count > 5:
+ self.assertTrue(False, "Time out when check result")
+ # disable trace
+ self.cmd_and_check("echo 0 > /sys/kernel/debug/tracing/events/sample-trace/enable")
+ # clean up trace
+ self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
+ # rmmod
+ self.cmd_and_check("rmmod trace-events-sample")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_trace_printk(self):
+ # check config
+ self.check_config("CONFIG_TRACING_SUPPORT")
+ # make sure if module exists
+ self.check_module_exist("trace_printk/", "trace-printk.ko")
+ # modprobe
+ self.cmd_and_check("modprobe trace-printk")
+ # lsmod
+ self.cmd_and_check("lsmod | grep trace_printk | cut -d\' \' -f1", "trace_printk")
+ # check result
+ self.cmd_and_check("cat /sys/kernel/debug/tracing/trace | grep trace_printk_irq_work | head -n1 | cut -d\':\' -f2", " trace_printk_irq_work")
+ # clean up trace
+ self.cmd_and_check("echo > /sys/kernel/debug/tracing/trace")
+ # rmmod
+ self.cmd_and_check("rmmod trace-printk")
+
+ # hw breakpoint
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_hw_breakpoint_example(self):
+ # check arch
+ status, output = self.target.run("uname -m")
+ result = ("x86" in output) or ("aarch64" in output)
+ if not result:
+ self.skipTest("the arch doesn't support hw breakpoint" % output)
+ # check config
+ self.check_config("CONFIG_KALLSYMS_ALL")
+ # make sure if module exists
+ self.check_module_exist("hw_breakpoint/", "data_breakpoint.ko")
+ # modprobe
+ self.cmd_and_check("modprobe data_breakpoint")
+ # lsmod
+ self.cmd_and_check("lsmod | grep data_breakpoint | cut -d\' \' -f1", "data_breakpoint")
+ # check result
+ self.cmd_and_check("cat /var/log/messages | grep sample_hbp_handler", "sample_hbp_handler")
+ # rmmod
+ self.cmd_and_check("rmmod data_breakpoint")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_configfs_sample(self):
+ # check config
+ status, ret = self.target.run('zcat /proc/config.gz | grep CONFIG_CONFIGFS_FS')
+ if not ["CONFIG_CONFIGFS_FS=m" in ret or "CONFIG_CONFIGFS_FS=y" in ret]:
+ self.skipTest("CONFIG error")
+ # make sure if module exists
+ self.check_module_exist("configfs/", "configfs_sample.ko")
+ # modprobe
+ self.cmd_and_check("modprobe configfs_sample")
+ # lsmod
+ self.cmd_and_check("lsmod | grep configfs_sample | cut -d\' \' -f1 | head -n1", "configfs_sample")
+
+ status = 1
+ count = 0
+ while status != 0:
+ time.sleep(1)
+ status, ret = self.target.run('cat /sys/kernel/config/01-childless/description')
+ count = count + 1
+ if count > 200:
+ self.skipTest("Time out for check dir")
+
+ # rmmod
+ self.cmd_and_check("rmmod configfs_sample")
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_cn_test(self):
+ # make sure if module exists
+ self.check_module_exist("connector/", "cn_test.ko")
+ # modprobe
+ self.cmd_and_check("modprobe cn_test")
+ # lsmod
+ self.cmd_and_check("lsmod | grep cn_test | cut -d\' \' -f1", "cn_test")
+ # check result
+ self.cmd_and_check("cat /proc/net/connector | grep cn_test | head -n1 | cut -d\' \' -f1", "cn_test")
+ # rmmod
+ self.cmd_and_check("rmmod cn_test")
diff --git a/poky/meta/lib/oeqa/runtime/cases/ldd.py b/poky/meta/lib/oeqa/runtime/cases/ldd.py
index c6d92fd..5bde184 100644
--- a/poky/meta/lib/oeqa/runtime/cases/ldd.py
+++ b/poky/meta/lib/oeqa/runtime/cases/ldd.py
@@ -2,24 +2,23 @@
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
class LddTest(OERuntimeTestCase):
@OETestID(962)
- @skipIfNotFeature('tools-sdk',
- 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OEHasPackage(["ldd"])
@OETestDepends(['ssh.SSHTest.test_ssh'])
- def test_ldd_exists(self):
+ def test_ldd(self):
status, output = self.target.run('which ldd')
msg = 'ldd does not exist in PATH: which ldd: %s' % output
self.assertEqual(status, 0, msg=msg)
- @OETestID(239)
- @OETestDepends(['ldd.LddTest.test_ldd_exists'])
- def test_ldd_rtldlist_check(self):
cmd = ('for i in $(which ldd | xargs cat | grep "^RTLDLIST"| '
'cut -d\'=\' -f2|tr -d \'"\'); '
'do test -f $i && echo $i && break; done')
status, output = self.target.run(cmd)
- msg = "ldd path not correct or RTLDLIST files don't exist."
- self.assertEqual(status, 0, msg=msg)
+ self.assertEqual(status, 0, msg="ldd path not correct or RTLDLIST files don't exist.")
+
+ status, output = self.target.run("ldd /bin/true")
+ self.assertEqual(status, 0, msg="ldd failed to execute: %s" % output)
diff --git a/poky/meta/lib/oeqa/runtime/cases/logrotate.py b/poky/meta/lib/oeqa/runtime/cases/logrotate.py
index 992fef2..db6e695 100644
--- a/poky/meta/lib/oeqa/runtime/cases/logrotate.py
+++ b/poky/meta/lib/oeqa/runtime/cases/logrotate.py
@@ -21,9 +21,9 @@
self.assertEqual(status, 0, msg = msg)
cmd = ('sed -i "s#wtmp {#wtmp {\\n olddir $HOME/logrotate_dir#"'
- ' /etc/logrotate.conf')
+ ' /etc/logrotate.d/wtmp')
status, output = self.target.run(cmd)
- msg = ('Could not write to logrotate.conf file. Status and output: '
+ msg = ('Could not write to logrotate.d/wtmp file. Status and output: '
' %s and %s' % (status, output))
self.assertEqual(status, 0, msg = msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py b/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py
index 005b697..a92a1f2 100644
--- a/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py
+++ b/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py
@@ -8,12 +8,14 @@
@OETestID(201)
@OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(["busybox-syslog", "sysklogd"])
+ @OEHasPackage(["busybox-syslog", "sysklogd", "rsyslog", "syslog-ng"])
def test_syslog_running(self):
- cmd = '%s | grep -i [s]yslogd' % self.tc.target_cmds['ps']
- status, output = self.target.run(cmd)
- msg = "No syslogd process; ps output: %s" % output
+ status, output = self.target.run(self.tc.target_cmds['ps'])
+ msg = "Failed to execute %s" % self.tc.target_cmds['ps']
self.assertEqual(status, 0, msg=msg)
+ msg = "No syslog daemon process; %s output:\n%s" % (self.tc.target_cmds['ps'], output)
+ hasdaemon = "syslogd" in output or "syslog-ng" in output
+ self.assertTrue(hasdaemon, msg=msg)
class SyslogTestConfig(OERuntimeTestCase):
@@ -45,7 +47,7 @@
@OETestID(202)
@OETestDepends(['oe_syslog.SyslogTestConfig.test_syslog_logger'])
- @OEHasPackage(["!sysklogd", "busybox"])
+ @OEHasPackage(["busybox-syslog"])
@skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
'Not appropiate for systemd image')
def test_syslog_startup_config(self):
diff --git a/poky/meta/lib/oeqa/runtime/cases/opkg.py b/poky/meta/lib/oeqa/runtime/cases/opkg.py
index 671ee06..29e9902 100644
--- a/poky/meta/lib/oeqa/runtime/cases/opkg.py
+++ b/poky/meta/lib/oeqa/runtime/cases/opkg.py
@@ -17,8 +17,11 @@
@classmethod
def setUpClass(cls):
- service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], 'all')
- cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ allarchfeed = 'all'
+ if cls.tc.td["MULTILIB_VARIANTS"]:
+ allarchfeed = cls.tc.td["TUNE_PKGARCH"]
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], allarchfeed)
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip, logger=cls.tc.logger)
cls.repo_server.start()
@classmethod
diff --git a/poky/meta/lib/oeqa/runtime/cases/parselogs.py b/poky/meta/lib/oeqa/runtime/cases/parselogs.py
index 1f36c61..f6e9820 100644
--- a/poky/meta/lib/oeqa/runtime/cases/parselogs.py
+++ b/poky/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -49,6 +49,10 @@
"error: couldn\'t mount because of unsupported optional features",
"GPT: Use GNU Parted to correct GPT errors",
"Cannot set xattr user.Librepo.DownloadInProgress",
+ "Failed to read /var/lib/nfs/statd/state: Success",
+ "error retry time-out =",
+ "logind: cannot setup systemd-logind helper (-61), using legacy fallback",
+ "Error changing net interface name 'eth0' to "
]
video_related = [
@@ -120,15 +124,6 @@
'dmi: Firmware registration failed.',
'irq: type mismatch, failed to map hwirq-27 for /intc',
] + common_errors,
- 'emenlow' : [
- '[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
- '(EE) Failed to load module "psb"',
- '(EE) Failed to load module psb',
- '(EE) Failed to load module "psbdrv"',
- '(EE) Failed to load module psbdrv',
- '(EE) open /dev/fb0: No such file or directory',
- '(EE) AIGLX: reverting to software rendering',
- ] + x86_common,
'intel-core2-32' : [
'ACPI: No _BQC method, cannot determine initial brightness',
'[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
@@ -156,7 +151,6 @@
'Bluetooth: hci0: Failed to send firmware data (-38)',
'atkbd serio0: Failed to enable keyboard on isa0060/serio0',
] + x86_common,
- 'crownbay' : x86_common,
'genericx86' : x86_common,
'genericx86-64' : [
'Direct firmware load for i915',
@@ -169,10 +163,6 @@
'edgerouter' : [
'Fatal server error:',
] + common_errors,
- 'jasperforest' : [
- 'Activated service \'org.bluez\' failed:',
- 'Unable to find NFC netlink family',
- ] + common_errors,
}
log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"]
@@ -323,7 +313,7 @@
pass
if result is not None:
- results[log.replace('target_logs/','')] = {}
+ results[log] = {}
rez = result.splitlines()
for xrez in rez:
@@ -333,7 +323,7 @@
grep_output = check_output(cmd).decode('utf-8')
except:
pass
- results[log.replace('target_logs/','')][xrez]=grep_output
+ results[log][xrez]=grep_output
return results
diff --git a/poky/meta/lib/oeqa/runtime/cases/perl.py b/poky/meta/lib/oeqa/runtime/cases/perl.py
index d0b7e8e..afeeb18 100644
--- a/poky/meta/lib/oeqa/runtime/cases/perl.py
+++ b/poky/meta/lib/oeqa/runtime/cases/perl.py
@@ -1,37 +1,13 @@
import os
from oeqa.runtime.case import OERuntimeTestCase
-from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
from oeqa.runtime.decorator.package import OEHasPackage
class PerlTest(OERuntimeTestCase):
-
- @classmethod
- def setUpClass(cls):
- src = os.path.join(cls.tc.files_dir, 'test.pl')
- dst = '/tmp/test.pl'
- cls.tc.target.copyTo(src, dst)
-
- @classmethod
- def tearDownClass(cls):
- dst = '/tmp/test.pl'
- cls.tc.target.run('rm %s' % dst)
-
- @OETestID(1141)
- @OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['perl'])
- def test_perl_exists(self):
- status, output = self.target.run('which perl')
- msg = 'Perl binary not in PATH or not on target.'
- self.assertEqual(status, 0, msg=msg)
-
@OETestID(208)
- @OETestDepends(['perl.PerlTest.test_perl_exists'])
+ @OEHasPackage(['perl'])
def test_perl_works(self):
- status, output = self.target.run('perl /tmp/test.pl')
- msg = 'Exit status was not 0. Output: %s' % output
- self.assertEqual(status, 0, msg=msg)
-
- msg = 'Incorrect output: %s' % output
- self.assertEqual(output, "the value of a is 0.01", msg=msg)
+ status, output = self.target.run("perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'")
+ self.assertEqual(status, 0)
+ self.assertEqual(output, "Hello, world")
diff --git a/poky/meta/lib/oeqa/runtime/cases/python.py b/poky/meta/lib/oeqa/runtime/cases/python.py
index bf3e179..4419a9f 100644
--- a/poky/meta/lib/oeqa/runtime/cases/python.py
+++ b/poky/meta/lib/oeqa/runtime/cases/python.py
@@ -1,43 +1,21 @@
-import os
-
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
-from oeqa.runtime.decorator.package import OEHasPackage
class PythonTest(OERuntimeTestCase):
-
@classmethod
def setUpClass(cls):
- src = os.path.join(cls.tc.files_dir, 'test.py')
- dst = '/tmp/test.py'
- cls.tc.target.copyTo(src, dst)
-
- @classmethod
- def tearDownClass(cls):
- dst = '/tmp/test.py'
- cls.tc.target.run('rm %s' % dst)
-
- @OETestID(1145)
- @OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['python-core'])
- def test_python_exists(self):
- status, output = self.target.run('which python')
- msg = 'Python binary not in PATH or not on target.'
- self.assertEqual(status, 0, msg=msg)
+ import unittest
+ if "python3-core" not in cls.tc.image_packages:
+ raise unittest.SkipTest("Python3 not on target")
@OETestID(965)
- @OETestDepends(['python.PythonTest.test_python_exists'])
- def test_python_stdout(self):
- status, output = self.target.run('python /tmp/test.py')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_python3(self):
+ cmd = "python3 -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
+ status, output = self.target.run(cmd)
msg = 'Exit status was not 0. Output: %s' % output
self.assertEqual(status, 0, msg=msg)
msg = 'Incorrect output: %s' % output
- self.assertEqual(output, "the value of a is 0.01", msg=msg)
-
- @OETestID(1146)
- @OETestDepends(['python.PythonTest.test_python_stdout'])
- def test_python_testfile(self):
- status, output = self.target.run('ls /tmp/testfile.python')
- self.assertEqual(status, 0, msg='Python test file generate failed.')
+ self.assertEqual(output, "Hello, world", msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/rpm.py b/poky/meta/lib/oeqa/runtime/cases/rpm.py
index 84c59a6..1e5e463 100644
--- a/poky/meta/lib/oeqa/runtime/cases/rpm.py
+++ b/poky/meta/lib/oeqa/runtime/cases/rpm.py
@@ -39,31 +39,31 @@
pkgarch = cls.td['TUNE_PKGARCH'].replace('-', '_')
rpmdir = os.path.join(cls.tc.td['DEPLOY_DIR'], 'rpm', pkgarch)
- # Pick rpm-doc as a test file to get installed, because it's small
+ # Pick base-passwd-doc as a test file to get installed, because it's small
# and it will always be built for standard targets
- rpm_doc = 'rpm-doc-*.%s.rpm' % pkgarch
+ rpm_doc = 'base-passwd-doc-*.%s.rpm' % pkgarch
for f in fnmatch.filter(os.listdir(rpmdir), rpm_doc):
test_file = os.path.join(rpmdir, f)
- dst = '/tmp/rpm-doc.rpm'
+ dst = '/tmp/base-passwd-doc.rpm'
cls.tc.target.copyTo(test_file, dst)
@classmethod
def tearDownClass(cls):
- dst = '/tmp/rpm-doc.rpm'
+ dst = '/tmp/base-passwd-doc.rpm'
cls.tc.target.run('rm -f %s' % dst)
@OETestID(192)
@OETestDepends(['rpm.RpmBasicTest.test_rpm_help'])
def test_rpm_install(self):
- status, output = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
- msg = 'Failed to install rpm-doc package: %s' % output
+ status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
+ msg = 'Failed to install base-passwd-doc package: %s' % output
self.assertEqual(status, 0, msg=msg)
@OETestID(194)
@OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_install'])
def test_rpm_remove(self):
- status,output = self.target.run('rpm -e rpm-doc')
- msg = 'Failed to remove rpm-doc package: %s' % output
+ status,output = self.target.run('rpm -e base-passwd-doc')
+ msg = 'Failed to remove base-passwd-doc package: %s' % output
self.assertEqual(status, 0, msg=msg)
@OETestID(1096)
@@ -107,7 +107,7 @@
Expected: There should be some RPM prefixed entries in the above file.
Product: BSPs
Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
db_files_cmd = 'ls /var/lib/rpm/__db.*'
@@ -119,16 +119,16 @@
self.assertEqual(0, status, msg=msg)
# Remove the package just in case
- self.target.run('rpm -e rpm-doc')
+ self.target.run('rpm -e base-passwd-doc')
# Install/Remove a package 10 times
for i in range(10):
- status, output = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
- msg = 'Failed to install rpm-doc package. Reason: {}'.format(output)
+ status, output = self.target.run('rpm -ivh /tmp/base-passwd-doc.rpm')
+ msg = 'Failed to install base-passwd-doc package. Reason: {}'.format(output)
self.assertEqual(0, status, msg=msg)
- status, output = self.target.run('rpm -e rpm-doc')
- msg = 'Failed to remove rpm-doc package. Reason: {}'.format(output)
+ status, output = self.target.run('rpm -e base-passwd-doc')
+ msg = 'Failed to remove base-passwd-doc package. Reason: {}'.format(output)
self.assertEqual(0, status, msg=msg)
# if using systemd this should ensure all entries are flushed to /var
diff --git a/poky/meta/lib/oeqa/runtime/cases/scanelf.py b/poky/meta/lib/oeqa/runtime/cases/scanelf.py
deleted file mode 100644
index 3ba1f78..0000000
--- a/poky/meta/lib/oeqa/runtime/cases/scanelf.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from oeqa.runtime.case import OERuntimeTestCase
-from oeqa.core.decorator.depends import OETestDepends
-from oeqa.core.decorator.oeid import OETestID
-from oeqa.runtime.decorator.package import OEHasPackage
-
-class ScanelfTest(OERuntimeTestCase):
- scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
-
- @OETestID(966)
- @OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['pax-utils'])
- def test_scanelf_textrel(self):
- # print TEXTREL information
- cmd = '%s --textrel' % self.scancmd
- status, output = self.target.run(cmd)
- msg = '\n'.join([cmd, output])
- self.assertEqual(output.strip(), '', msg=msg)
-
- @OETestID(967)
- @OETestDepends(['scanelf.ScanelfTest.test_scanelf_textrel'])
- def test_scanelf_rpath(self):
- # print RPATH information
- cmd = '%s --textrel --rpath' % self.scancmd
- status, output = self.target.run(cmd)
- msg = '\n'.join([cmd, output])
- self.assertEqual(output.strip(), '', msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/stap.py b/poky/meta/lib/oeqa/runtime/cases/stap.py
index fc728bf..96e197a 100644
--- a/poky/meta/lib/oeqa/runtime/cases/stap.py
+++ b/poky/meta/lib/oeqa/runtime/cases/stap.py
@@ -25,7 +25,7 @@
def test_stap(self):
cmds = [
'cd /usr/src/kernel && make scripts prepare',
- 'cd /lib/modules/`uname -r` && (if [ ! -L build ]; then ln -s /usr/src/kernel build; fi)',
+ 'cd /lib/modules/`uname -r` && (if [ ! -e build ]; then ln -s /usr/src/kernel build; fi)',
'stap --disable-cache -DSTP_NO_VERREL_CHECK /tmp/hello.stp'
]
for cmd in cmds:
diff --git a/poky/meta/lib/oeqa/runtime/cases/systemd.py b/poky/meta/lib/oeqa/runtime/cases/systemd.py
index db69384..460b8fc 100644
--- a/poky/meta/lib/oeqa/runtime/cases/systemd.py
+++ b/poky/meta/lib/oeqa/runtime/cases/systemd.py
@@ -11,11 +11,11 @@
class SystemdTest(OERuntimeTestCase):
def systemctl(self, action='', target='', expected=0, verbose=False):
- command = 'systemctl %s %s' % (action, target)
+ command = 'SYSTEMD_BUS_TIMEOUT=240s systemctl %s %s' % (action, target)
status, output = self.target.run(command)
message = '\n'.join([command, output])
if status != expected and verbose:
- cmd = 'systemctl status --full %s' % target
+ cmd = 'SYSTEMD_BUS_TIMEOUT=240s systemctl status --full %s' % target
message += self.target.run(cmd)[1]
self.assertEqual(status, expected, message)
return output
@@ -63,7 +63,7 @@
"""
endtime = time.time() + (60 * 2)
while True:
- status, output = self.target.run('systemctl --state=activating')
+ status, output = self.target.run('SYSTEMD_BUS_TIMEOUT=240s systemctl --state=activating')
if "0 loaded units listed" in output:
return (True, '')
if time.time() >= endtime:
diff --git a/poky/meta/lib/oeqa/runtime/context.py b/poky/meta/lib/oeqa/runtime/context.py
index 0294003..a7f3823 100644
--- a/poky/meta/lib/oeqa/runtime/context.py
+++ b/poky/meta/lib/oeqa/runtime/context.py
@@ -112,12 +112,9 @@
# XXX: Don't base your targets on this code it will be refactored
# in the near future.
# Custom target module loading
- try:
- target_modules_path = kwargs.get('target_modules_path', '')
- controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
- target = controller(logger, target_ip, server_ip, **kwargs)
- except ImportError as e:
- raise TypeError("Failed to import %s from available controller modules" % target_type)
+ target_modules_path = kwargs.get('target_modules_path', '')
+ controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
+ target = controller(logger, target_ip, server_ip, **kwargs)
return target
@@ -173,10 +170,7 @@
def _loadControllerFromModule(target, modulename):
obj = None
# import module, allowing it to raise import exception
- try:
- module = __import__(modulename, globals(), locals(), [target])
- except Exception as e:
- return obj
+ module = __import__(modulename, globals(), locals(), [target])
# look for target class in the module, catching any exceptions as it
# is valid that a module may not have the target class.
try:
diff --git a/poky/meta/lib/oeqa/sdk/cases/assimp.py b/poky/meta/lib/oeqa/sdk/cases/assimp.py
new file mode 100644
index 0000000..26c1df0
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/assimp.py
@@ -0,0 +1,63 @@
+import os, subprocess, unittest
+import bb
+from oeqa.sdk.case import OESDKTestCase
+
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class BuildAssimp(OESDKTestCase):
+ """
+ Test case to build a project using cmake.
+ """
+
+ td_vars = ['DATETIME', 'TARGET_OS', 'TARGET_ARCH']
+
+ @classmethod
+ def setUpClass(self):
+ if not (self.tc.hasHostPackage("nativesdk-cmake") or
+ self.tc.hasHostPackage("cmake-native")):
+ raise unittest.SkipTest("Needs cmake")
+
+ def fetch(self, workdir, dl_dir, url, archive=None):
+ if not archive:
+ from urllib.parse import urlparse
+ archive = os.path.basename(urlparse(url).path)
+
+ if dl_dir:
+ tarball = os.path.join(dl_dir, archive)
+ if os.path.exists(tarball):
+ return tarball
+
+ tarball = os.path.join(workdir, archive)
+ subprocess.check_output(["wget", "-O", tarball, url])
+ return tarball
+
+ def test_assimp(self):
+ import tempfile
+ import oe.qa, oe.elf
+
+ with tempfile.TemporaryDirectory(prefix="assimp", dir=self.tc.sdk_dir) as testdir:
+ dl_dir = self.td.get('DL_DIR', None)
+ tarball = self.fetch(testdir, dl_dir, "https://github.com/assimp/assimp/archive/v4.1.0.tar.gz")
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+
+ sourcedir = os.path.join(testdir, "assimp-4.1.0")
+ builddir = os.path.join(testdir, "build")
+ installdir = os.path.join(testdir, "install")
+ bb.utils.mkdirhier(builddir)
+
+ self._run("cd %s && cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON %s " % (builddir, sourcedir))
+ self._run("cmake --build %s -- -j" % builddir)
+ self._run("cmake --build %s --target install -- DESTDIR=%s" % (builddir, installdir))
+
+ elf = oe.qa.ELFFile(os.path.join(installdir, "usr", "local", "lib", "libassimp.so.4.1.0"))
+ elf.open()
+
+ output = self._run("echo $OECORE_TARGET_OS:$OECORE_TARGET_ARCH")
+ target_os, target_arch = output.strip().split(":")
+ machine_data = oe.elf.machine_dict(None)[target_os][target_arch]
+ (machine, osabi, abiversion, endian, bits) = machine_data
+
+ self.assertEqual(machine, elf.machine())
+ self.assertEqual(bits, elf.abiSize())
+ self.assertEqual(endian, elf.isLittleEndian())
diff --git a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py
index 780afcc..050d1b3 100644
--- a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py
+++ b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py
@@ -8,9 +8,11 @@
@classmethod
def setUpClass(self):
- if not (self.tc.hasTargetPackage("gtk\+3") or\
- self.tc.hasTargetPackage("libgtk-3.0")):
+ if not (self.tc.hasTargetPackage("gtk+3", multilib=True) or \
+ self.tc.hasTargetPackage("libgtk-3.0", multilib=True)):
raise unittest.SkipTest("GalculatorTest class: SDK don't support gtk+3")
+ if not (self.tc.hasHostPackage("nativesdk-gettext-dev")):
+ raise unittest.SkipTest("GalculatorTest class: SDK doesn't contain gettext")
def test_galculator(self):
dl_dir = self.td.get('DL_DIR', None)
diff --git a/poky/meta/lib/oeqa/sdk/cases/buildlzip.py b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py
index 3a89ce8..b28cc3a 100644
--- a/poky/meta/lib/oeqa/sdk/cases/buildlzip.py
+++ b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py
@@ -17,8 +17,8 @@
machine = self.td.get("MACHINE")
- if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
- self.tc.hasTargetPackage("gcc")):
+ if not (self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine) or
+ self.tc.hasHostPackage("^gcc-", regex=True)):
raise unittest.SkipTest("SDK doesn't contain a cross-canadian toolchain")
def test_lzip(self):
diff --git a/poky/meta/lib/oeqa/sdk/cases/gcc.py b/poky/meta/lib/oeqa/sdk/cases/gcc.py
index d11f4b6..b32b01f 100644
--- a/poky/meta/lib/oeqa/sdk/cases/gcc.py
+++ b/poky/meta/lib/oeqa/sdk/cases/gcc.py
@@ -18,8 +18,8 @@
def setUp(self):
machine = self.td.get("MACHINE")
- if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
- self.tc.hasTargetPackage("gcc")):
+ if not (self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine) or
+ self.tc.hasHostPackage("^gcc-", regex=True)):
raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain")
def test_gcc_compile(self):
diff --git a/poky/meta/lib/oeqa/sdk/cases/perl.py b/poky/meta/lib/oeqa/sdk/cases/perl.py
index 8085678..ff50b46 100644
--- a/poky/meta/lib/oeqa/sdk/cases/perl.py
+++ b/poky/meta/lib/oeqa/sdk/cases/perl.py
@@ -1,8 +1,4 @@
-import os
-import shutil
import unittest
-
-from oeqa.core.utils.path import remove_safe
from oeqa.sdk.case import OESDKTestCase
class PerlTest(OESDKTestCase):
@@ -12,17 +8,10 @@
self.tc.hasHostPackage("perl-native")):
raise unittest.SkipTest("No perl package in the SDK")
- for f in ['test.pl']:
- shutil.copyfile(os.path.join(self.tc.files_dir, f),
- os.path.join(self.tc.sdk_dir, f))
- self.testfile = os.path.join(self.tc.sdk_dir, "test.pl")
-
- def test_perl_exists(self):
- self._run('which perl')
-
- def test_perl_works(self):
- self._run('perl %s' % self.testfile)
-
- @classmethod
- def tearDownClass(self):
- remove_safe(self.testfile)
+ def test_perl(self):
+ try:
+ cmd = "perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'"
+ output = self._run(cmd)
+ self.assertEqual(output, "Hello, world")
+ except subprocess.CalledProcessError as e:
+ self.fail("Unexpected exit %d (output %s)" % (e.returncode, e.output))
diff --git a/poky/meta/lib/oeqa/sdk/cases/python.py b/poky/meta/lib/oeqa/sdk/cases/python.py
index 72dfcc7..bd5f1f6 100644
--- a/poky/meta/lib/oeqa/sdk/cases/python.py
+++ b/poky/meta/lib/oeqa/sdk/cases/python.py
@@ -1,32 +1,17 @@
-import os
-import shutil
-import unittest
-
-from oeqa.core.utils.path import remove_safe
+import subprocess, unittest
from oeqa.sdk.case import OESDKTestCase
class PythonTest(OESDKTestCase):
@classmethod
def setUpClass(self):
- if not (self.tc.hasHostPackage("nativesdk-python") or
- self.tc.hasHostPackage("python-native")):
+ if not (self.tc.hasHostPackage("nativesdk-python3") or
+ self.tc.hasHostPackage("python3-native")):
raise unittest.SkipTest("No python package in the SDK")
- for f in ['test.py']:
- shutil.copyfile(os.path.join(self.tc.files_dir, f),
- os.path.join(self.tc.sdk_dir, f))
-
- def test_python_exists(self):
- self._run('which python')
-
- def test_python_stdout(self):
- output = self._run('python %s/test.py' % self.tc.sdk_dir)
- self.assertEqual(output.strip(), "the value of a is 0.01", msg="Incorrect output: %s" % output)
-
- def test_python_testfile(self):
- self._run('ls /tmp/testfile.python')
-
- @classmethod
- def tearDownClass(self):
- remove_safe("%s/test.py" % self.tc.sdk_dir)
- remove_safe("/tmp/testfile.python")
+ def test_python3(self):
+ try:
+ cmd = "python3 -c \"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\""
+ output = self._run(cmd)
+ self.assertEqual(output, "Hello, world\n")
+ except subprocess.CalledProcessError as e:
+ self.fail("Unexpected exit %d (output %s)" % (e.returncode, e.output))
diff --git a/poky/meta/lib/oeqa/sdk/context.py b/poky/meta/lib/oeqa/sdk/context.py
index 82e4c19b..adc4166 100644
--- a/poky/meta/lib/oeqa/sdk/context.py
+++ b/poky/meta/lib/oeqa/sdk/context.py
@@ -20,17 +20,30 @@
self.target_pkg_manifest = target_pkg_manifest
self.host_pkg_manifest = host_pkg_manifest
- def _hasPackage(self, manifest, pkg):
- for host_pkg in manifest.keys():
- if re.search(pkg, host_pkg):
+ def _hasPackage(self, manifest, pkg, regex=False):
+ if regex:
+ # do regex match
+ pat = re.compile(pkg)
+ for p in manifest.keys():
+ if pat.search(p):
+ return True
+ else:
+ # do exact match
+ if pkg in manifest.keys():
return True
return False
- def hasHostPackage(self, pkg):
- return self._hasPackage(self.host_pkg_manifest, pkg)
+ def hasHostPackage(self, pkg, regex=False):
+ return self._hasPackage(self.host_pkg_manifest, pkg, regex=regex)
- def hasTargetPackage(self, pkg):
- return self._hasPackage(self.target_pkg_manifest, pkg)
+ def hasTargetPackage(self, pkg, multilib=False, regex=False):
+ if multilib:
+ # match multilib according to sdk_env
+ mls = self.td.get('MULTILIB_VARIANTS', '').split()
+ for ml in mls:
+ if ('ml'+ml) in self.sdk_env:
+ pkg = ml + '-' + pkg
+ return self._hasPackage(self.target_pkg_manifest, pkg, regex=regex)
class OESDKTestContextExecutor(OETestContextExecutor):
_context_class = OESDKTestContext
@@ -65,6 +78,9 @@
sdk_rgroup.add_argument('--sdk-dir', required=False, action='store',
help='sdk installed directory')
+ self.parser.add_argument('-j', '--num-processes', dest='processes', action='store',
+ type=int, help="number of processes to execute in parallel with")
+
@staticmethod
def _load_manifest(manifest):
pkg_manifest = {}
@@ -85,6 +101,7 @@
OESDKTestContextExecutor._load_manifest(args.target_manifest)
self.tc_kwargs['init']['host_pkg_manifest'] = \
OESDKTestContextExecutor._load_manifest(args.host_manifest)
+ self.tc_kwargs['run']['processes'] = args.processes
@staticmethod
def _get_sdk_environs(sdk_dir):
diff --git a/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py b/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
index 4e25114..6fed73e 100644
--- a/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
+++ b/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
@@ -20,10 +20,9 @@
BuildProject.__init__(self, uri, foldername, tmpdir=testpath, dl_dir=dl_dir)
def download_archive(self):
-
self._download_archive()
- cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)
+ cmd = 'tar xf %s -C %s' % (os.path.join(self.targetdir, self.archive), self.targetdir)
subprocess.check_output(cmd, shell=True)
#Change targetdir to project folder
@@ -42,4 +41,9 @@
def _run(self, cmd):
self.log("Running . %s; " % self.sdkenv + cmd)
- return subprocess.call(". %s; " % self.sdkenv + cmd, shell=True)
+ try:
+ output = subprocess.check_output(". %s; " % self.sdkenv + cmd, shell=True, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ print(exc.output.decode('utf-8'))
+ return exc.returncode
+ return 0
diff --git a/poky/meta/lib/oeqa/sdkext/cases/devtool.py b/poky/meta/lib/oeqa/sdkext/cases/devtool.py
index ea90517..0860e8d 100644
--- a/poky/meta/lib/oeqa/sdkext/cases/devtool.py
+++ b/poky/meta/lib/oeqa/sdkext/cases/devtool.py
@@ -6,7 +6,6 @@
import subprocess
from oeqa.sdkext.case import OESDKExtTestCase
-from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
from oeqa.utils.httpserver import HTTPService
@@ -45,28 +44,23 @@
self.assertEqual(output.startswith(self.tc.sdk_dir), True, \
msg="Seems that devtool isn't the eSDK one: %s" % output)
- @OETestDepends(['test_devtool_location'])
def test_devtool_add_reset(self):
self._run('devtool add myapp %s' % self.myapp_dst)
self._run('devtool reset myapp')
@OETestID(1605)
- @OETestDepends(['test_devtool_location'])
def test_devtool_build_make(self):
self._test_devtool_build(self.myapp_dst)
@OETestID(1606)
- @OETestDepends(['test_devtool_location'])
def test_devtool_build_esdk_package(self):
self._test_devtool_build_package(self.myapp_dst)
@OETestID(1607)
- @OETestDepends(['test_devtool_location'])
def test_devtool_build_cmake(self):
self._test_devtool_build(self.myapp_cmake_dst)
@OETestID(1608)
- @OETestDepends(['test_devtool_location'])
def test_extend_autotools_recipe_creation(self):
req = 'https://github.com/rdfa/librdfa'
recipe = "librdfa"
@@ -78,7 +72,6 @@
self._run('devtool reset %s' % recipe)
@OETestID(1609)
- @OETestDepends(['test_devtool_location'])
def test_devtool_kernelmodule(self):
docfile = 'https://github.com/umlaeute/v4l2loopback.git'
recipe = 'v4l2loopback-driver'
@@ -89,7 +82,6 @@
self._run('devtool reset %s' % recipe)
@OETestID(1610)
- @OETestDepends(['test_devtool_location'])
def test_recipes_for_nodejs(self):
package_nodejs = "npm://registry.npmjs.org;name=winston;version=2.2.0"
self._run('devtool add %s ' % package_nodejs)
diff --git a/poky/meta/lib/oeqa/selftest/case.py b/poky/meta/lib/oeqa/selftest/case.py
index 2e446b0..9c08d59 100644
--- a/poky/meta/lib/oeqa/selftest/case.py
+++ b/poky/meta/lib/oeqa/selftest/case.py
@@ -215,31 +215,31 @@
ftools.remove_from_file(self.testinc_path, data)
def recipeinc(self, recipe):
- """Return absolute path of meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ """Return absolute path of meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
return os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
def write_recipeinc(self, recipe, data):
- """Write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ """Write to meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
inc_file = self.recipeinc(recipe)
self.logger.debug("Writing to: %s\n%s\n" % (inc_file, data))
ftools.write_file(inc_file, data)
return inc_file
def append_recipeinc(self, recipe, data):
- """Append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ """Append data to meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
inc_file = self.recipeinc(recipe)
self.logger.debug("Appending to: %s\n%s\n" % (inc_file, data))
ftools.append_file(inc_file, data)
return inc_file
def remove_recipeinc(self, recipe, data):
- """Remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ """Remove data from meta-selftest/recipes-test/<recipe>/test_recipe.inc"""
inc_file = self.recipeinc(recipe)
self.logger.debug("Removing from: %s\n%s\n" % (inc_file, data))
ftools.remove_from_file(inc_file, data)
def delete_recipeinc(self, recipe):
- """Delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file"""
+ """Delete meta-selftest/recipes-test/<recipe>/test_recipe.inc file"""
inc_file = self.recipeinc(recipe)
self.logger.debug("Deleting file: %s" % inc_file)
try:
diff --git a/poky/meta/lib/oeqa/selftest/cases/bblayers.py b/poky/meta/lib/oeqa/selftest/cases/bblayers.py
index 90a2249..447c54b 100644
--- a/poky/meta/lib/oeqa/selftest/cases/bblayers.py
+++ b/poky/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -2,7 +2,7 @@
import re
import oeqa.utils.ftools as ftools
-from oeqa.utils.commands import runCmd, get_bb_var
+from oeqa.utils.commands import runCmd, get_bb_var, get_bb_vars
from oeqa.selftest.case import OESelftestTestCase
from oeqa.core.decorator.oeid import OETestID
@@ -85,6 +85,31 @@
self.assertNotEqual(result.status, 0, 'bitbake-layers show-recipes -i nonexistentclass should have failed')
self.assertIn('ERROR:', result.output)
+ def test_bitbakelayers_createlayer(self):
+ priority = 10
+ layername = 'test-bitbakelayer-layercreate'
+ layerpath = os.path.join(self.builddir, layername)
+ self.assertFalse(os.path.exists(layerpath), '%s should not exist at this point in time' % layerpath)
+ result = runCmd('bitbake-layers create-layer --priority=%d %s' % (priority, layerpath))
+ self.track_for_cleanup(layerpath)
+ result = runCmd('bitbake-layers add-layer %s' % layerpath)
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % layerpath)
+ result = runCmd('bitbake-layers show-layers')
+ find_in_contents = re.search(re.escape(layername) + r'\s+' + re.escape(layerpath) + r'\s+' + re.escape(str(priority)), result.output)
+ self.assertTrue(find_in_contents, "%s not found in layers\n%s" % (layername, result.output))
+
+ layervars = ['BBFILE_PRIORITY', 'BBFILE_PATTERN', 'LAYERDEPENDS', 'LAYERSERIES_COMPAT']
+ bb_vars = get_bb_vars(['BBFILE_COLLECTIONS'] + ['%s_%s' % (v, layername) for v in layervars])
+
+ for v in layervars:
+ varname = '%s_%s' % (v, layername)
+ self.assertIsNotNone(bb_vars[varname], "%s not found" % varname)
+
+ find_in_contents = re.search(r'(^|\s)' + re.escape(layername) + r'($|\s)', bb_vars['BBFILE_COLLECTIONS'])
+ self.assertTrue(find_in_contents, "%s not in BBFILE_COLLECTIONS" % layername)
+
+ self.assertEqual(bb_vars['BBFILE_PRIORITY_%s' % layername], str(priority), 'BBFILE_PRIORITY_%s != %d' % (layername, priority))
+
def get_recipe_basename(self, recipe):
recipe_file = ""
result = runCmd("bitbake-layers show-recipes -f %s" % recipe)
diff --git a/poky/meta/lib/oeqa/selftest/cases/bbtests.py b/poky/meta/lib/oeqa/selftest/cases/bbtests.py
index 3506149..005fdd0 100644
--- a/poky/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/poky/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -15,16 +15,26 @@
return l
@OETestID(789)
+ # Test bitbake can run from the <builddir>/conf directory
def test_run_bitbake_from_dir_1(self):
os.chdir(os.path.join(self.builddir, 'conf'))
self.assertEqual(bitbake('-e').status, 0, msg = "bitbake couldn't run from \"conf\" dir")
@OETestID(790)
+ # Test bitbake can run from the <builddir>'s parent directory
def test_run_bitbake_from_dir_2(self):
my_env = os.environ.copy()
my_env['BBPATH'] = my_env['BUILDDIR']
os.chdir(os.path.dirname(os.environ['BUILDDIR']))
- self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir")
+ self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir's parent directory")
+
+ # Test bitbake can run from some other random system location (we use /tmp/)
+ def test_run_bitbake_from_dir_3(self):
+ my_env = os.environ.copy()
+ my_env['BBPATH'] = my_env['BUILDDIR']
+ os.chdir("/tmp/")
+ self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from /tmp/")
+
@OETestID(806)
def test_event_handler(self):
diff --git a/poky/meta/lib/oeqa/selftest/cases/buildoptions.py b/poky/meta/lib/oeqa/selftest/cases/buildoptions.py
index 24597ac..f234bac 100644
--- a/poky/meta/lib/oeqa/selftest/cases/buildoptions.py
+++ b/poky/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -180,3 +180,26 @@
bitbake('gcc-runtime libgfortran')
+class SourceMirroring(OESelftestTestCase):
+ # Can we download everything from the Yocto Sources Mirror over http only
+ def test_yocto_source_mirror(self):
+ self.write_config("""
+BB_ALLOWED_NETWORKS = "downloads.yoctoproject.org"
+MIRRORS = ""
+DL_DIR = "${TMPDIR}/test_downloads"
+PREMIRRORS = "\\
+ bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ http://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n \\
+ https://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \\n"
+""")
+
+ bitbake("world --runall fetch")
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/devtool.py b/poky/meta/lib/oeqa/selftest/cases/devtool.py
index d5b6a46..9eb9bad 100644
--- a/poky/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/poky/meta/lib/oeqa/selftest/cases/devtool.py
@@ -11,9 +11,124 @@
from oeqa.utils.commands import get_bb_vars, runqemu, get_test_layer
from oeqa.core.decorator.oeid import OETestID
+oldmetapath = None
+
+def setUpModule():
+ import bb.utils
+
+ global templayerdir
+ templayerdir = tempfile.mkdtemp(prefix='devtoolqa')
+ corecopydir = os.path.join(templayerdir, 'core-copy')
+ bblayers_conf = os.path.join(os.environ['BUILDDIR'], 'conf', 'bblayers.conf')
+ edited_layers = []
+
+ # We need to take a copy of the meta layer so we can modify it and not
+ # have any races against other tests that might be running in parallel
+ # however things like COREBASE mean that you can't just copy meta, you
+ # need the whole repository.
+ def bblayers_edit_cb(layerpath, canonical_layerpath):
+ global oldmetapath
+ if not canonical_layerpath.endswith('/'):
+ # This helps us match exactly when we're using this path later
+ canonical_layerpath += '/'
+ if not edited_layers and canonical_layerpath.endswith('/meta/'):
+ canonical_layerpath = os.path.realpath(canonical_layerpath) + '/'
+ edited_layers.append(layerpath)
+ oldmetapath = os.path.realpath(layerpath)
+ result = runCmd('git rev-parse --show-toplevel', cwd=canonical_layerpath)
+ oldreporoot = result.output.rstrip()
+ newmetapath = os.path.join(corecopydir, os.path.relpath(oldmetapath, oldreporoot))
+ runCmd('git clone %s %s' % (oldreporoot, corecopydir), cwd=templayerdir)
+ # Now we need to copy any modified files
+ # You might ask "why not just copy the entire tree instead of
+ # cloning and doing this?" - well, the problem with that is
+ # TMPDIR or an equally large subdirectory might exist
+ # under COREBASE and we don't want to copy that, so we have
+ # to be selective.
+ result = runCmd('git status --porcelain', cwd=oldreporoot)
+ for line in result.output.splitlines():
+ if line.startswith(' M ') or line.startswith('?? '):
+ relpth = line.split()[1]
+ pth = os.path.join(oldreporoot, relpth)
+ if pth.startswith(canonical_layerpath):
+ if relpth.endswith('/'):
+ destdir = os.path.join(corecopydir, relpth)
+ shutil.copytree(pth, destdir)
+ else:
+ destdir = os.path.join(corecopydir, os.path.dirname(relpth))
+ bb.utils.mkdirhier(destdir)
+ shutil.copy2(pth, destdir)
+ return newmetapath
+ else:
+ return layerpath
+ bb.utils.edit_bblayers_conf(bblayers_conf, None, None, bblayers_edit_cb)
+
+def tearDownModule():
+ if oldmetapath:
+ edited_layers = []
+ def bblayers_edit_cb(layerpath, canonical_layerpath):
+ if not edited_layers and canonical_layerpath.endswith('/meta'):
+ edited_layers.append(layerpath)
+ return oldmetapath
+ else:
+ return layerpath
+ bblayers_conf = os.path.join(os.environ['BUILDDIR'], 'conf', 'bblayers.conf')
+ bb.utils.edit_bblayers_conf(bblayers_conf, None, None, bblayers_edit_cb)
+ shutil.rmtree(templayerdir)
+
class DevtoolBase(OESelftestTestCase):
- buffer = True
+ @classmethod
+ def setUpClass(cls):
+ super(DevtoolBase, cls).setUpClass()
+ bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
+ cls.original_sstate = bb_vars['SSTATE_DIR']
+ cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
+ cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
+ cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
+ % cls.original_sstate)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
+ runCmd('rm -rf %s' % cls.devtool_sstate)
+ super(DevtoolBase, cls).tearDownClass()
+
+ def setUp(self):
+ """Test case setup function"""
+ super(DevtoolBase, self).setUp()
+ self.workspacedir = os.path.join(self.builddir, 'workspace')
+ self.assertTrue(not os.path.exists(self.workspacedir),
+ 'This test cannot be run with a workspace directory '
+ 'under the build directory')
+ self.append_config(self.sstate_conf)
+
+ def _check_src_repo(self, repo_dir):
+ """Check srctree git repository"""
+ self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
+ 'git repository for external source tree not found')
+ result = runCmd('git status --porcelain', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "",
+ 'Created git repo is not clean')
+ result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "refs/heads/devtool",
+ 'Wrong branch in git repo')
+
+ def _check_repo_status(self, repo_dir, expected_status):
+ """Check the worktree status of a repository"""
+ result = runCmd('git status . --porcelain',
+ cwd=repo_dir)
+ for line in result.output.splitlines():
+ for ind, (f_status, fn_re) in enumerate(expected_status):
+ if re.match(fn_re, line[3:]):
+ if f_status != line[:2]:
+ self.fail('Unexpected status in line: %s' % line)
+ expected_status.pop(ind)
+ break
+ else:
+ self.fail('Unexpected modified file in line: %s' % line)
+ if expected_status:
+ self.fail('Missing file changes: %s' % expected_status)
def _test_recipe_contents(self, recipefile, checkvars, checkinherits):
with open(recipefile, 'r') as f:
@@ -118,58 +233,6 @@
class DevtoolTests(DevtoolBase):
- @classmethod
- def setUpClass(cls):
- super(DevtoolTests, cls).setUpClass()
- bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
- cls.original_sstate = bb_vars['SSTATE_DIR']
- cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
- cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
- cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
- % cls.original_sstate)
-
- @classmethod
- def tearDownClass(cls):
- cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
- runCmd('rm -rf %s' % cls.devtool_sstate)
- super(DevtoolTests, cls).tearDownClass()
-
- def setUp(self):
- """Test case setup function"""
- super(DevtoolTests, self).setUp()
- self.workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(self.workspacedir),
- 'This test cannot be run with a workspace directory '
- 'under the build directory')
- self.append_config(self.sstate_conf)
-
- def _check_src_repo(self, repo_dir):
- """Check srctree git repository"""
- self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
- 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=repo_dir)
- self.assertEqual(result.output.strip(), "",
- 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool",
- 'Wrong branch in git repo')
-
- def _check_repo_status(self, repo_dir, expected_status):
- """Check the worktree status of a repository"""
- result = runCmd('git status . --porcelain',
- cwd=repo_dir)
- for line in result.output.splitlines():
- for ind, (f_status, fn_re) in enumerate(expected_status):
- if re.match(fn_re, line[3:]):
- if f_status != line[:2]:
- self.fail('Unexpected status in line: %s' % line)
- expected_status.pop(ind)
- break
- else:
- self.fail('Unexpected modified file in line: %s' % line)
- if expected_status:
- self.fail('Missing file changes: %s' % expected_status)
-
@OETestID(1158)
def test_create_workspace(self):
# Check preconditions
@@ -191,6 +254,8 @@
self.assertNotIn(tempdir, result.output)
self.assertIn(self.workspacedir, result.output)
+class DevtoolAddTests(DevtoolBase):
+
@OETestID(1159)
def test_devtool_add(self):
# Fetch source
@@ -235,6 +300,8 @@
@OETestID(1423)
def test_devtool_add_git_local(self):
+ # We need dbus built so that DEPENDS recognition works
+ bitbake('dbus')
# Fetch source from a remote URL, but do it outside of devtool
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -444,6 +511,8 @@
checkvars['SRC_URI'] = url.replace(testver, '${PV}')
self._test_recipe_contents(recipefile, checkvars, [])
+class DevtoolModifyTests(DevtoolBase):
+
@OETestID(1164)
def test_devtool_modify(self):
import oe.path
@@ -689,6 +758,7 @@
self._check_src_repo(tempdir)
# This is probably sufficient
+class DevtoolUpdateTests(DevtoolBase):
@OETestID(1169)
def test_devtool_update_recipe(self):
@@ -1105,6 +1175,8 @@
expected_status = []
self._check_repo_status(os.path.dirname(recipefile), expected_status)
+class DevtoolExtractTests(DevtoolBase):
+
@OETestID(1163)
def test_devtool_extract(self):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
@@ -1274,6 +1346,8 @@
if reqpkgs:
self.fail('The following packages were not present in the image as expected: %s' % ', '.join(reqpkgs))
+class DevtoolUpgradeTests(DevtoolBase):
+
@OETestID(1367)
def test_devtool_upgrade(self):
# Check preconditions
diff --git a/poky/meta/lib/oeqa/selftest/cases/distrodata.py b/poky/meta/lib/oeqa/selftest/cases/distrodata.py
index 7b28004..e7b5e34 100644
--- a/poky/meta/lib/oeqa/selftest/cases/distrodata.py
+++ b/poky/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -21,7 +21,7 @@
Summary: Test that upstream version checks do not regress
Expected: Upstream version checks should succeed except for the recipes listed in the exception list.
Product: oe-core
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
checkpkg_result = open(os.path.join(get_bb_var("LOG_DIR"), "checkpkg.csv")).readlines()[1:]
regressed_failures = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] if pkg_data[11] == 'UNKNOWN_BROKEN']
@@ -46,7 +46,7 @@
Summary: Test that oe-core recipes have a maintainer
Expected: All oe-core recipes (except a few special static/testing ones) should have a maintainer listed in maintainers.inc file.
Product: oe-core
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
def is_exception(pkg):
exceptions = ["packagegroup-", "initramfs-", "systemd-machine-units", "target-sdk-provides-dummy"]
diff --git a/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py b/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
index 0c83256..c6f39d5 100644
--- a/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
+++ b/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
@@ -11,7 +11,6 @@
class GenericEFITest(OESelftestTestCase):
"""EFI booting test class"""
- buffer = True
cmd_common = "runqemu nographic serial wic ovmf"
efi_provider = "systemd-boot"
image = "core-image-minimal"
diff --git a/poky/meta/lib/oeqa/selftest/cases/fetch.py b/poky/meta/lib/oeqa/selftest/cases/fetch.py
new file mode 100644
index 0000000..4acc8cd
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/fetch.py
@@ -0,0 +1,49 @@
+import oe.path
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+from oeqa.core.decorator.oeid import OETestID
+
+class Fetch(OESelftestTestCase):
+ @OETestID(1058)
+ def test_git_mirrors(self):
+ """
+ Verify that the git fetcher will fall back to the HTTP mirrors. The
+ recipe needs to be one that we have on the Yocto Project source mirror
+ and is hosted in git.
+ """
+
+ # TODO: mktempd instead of hardcoding
+ dldir = os.path.join(self.builddir, "download-git-mirrors")
+ self.track_for_cleanup(dldir)
+
+ # No mirrors, should use git to fetch successfully
+ features = """
+DL_DIR = "%s"
+MIRRORS_forcevariable = ""
+PREMIRRORS_forcevariable = ""
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ bitbake("dbus-wait -c fetch -f")
+
+ # No mirrors and broken git, should fail
+ features = """
+DL_DIR = "%s"
+GIT_PROXY_COMMAND = "false"
+MIRRORS_forcevariable = ""
+PREMIRRORS_forcevariable = ""
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ with self.assertRaises(AssertionError):
+ bitbake("dbus-wait -c fetch -f")
+
+ # Broken git but a specific mirror
+ features = """
+DL_DIR = "%s"
+GIT_PROXY_COMMAND = "false"
+MIRRORS_forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
+""" % dldir
+ self.write_config(features)
+ oe.path.remove(dldir, recurse=True)
+ bitbake("dbus-wait -c fetch -f")
diff --git a/poky/meta/lib/oeqa/selftest/cases/image_typedep.py b/poky/meta/lib/oeqa/selftest/cases/image_typedep.py
index e678885..932c7f8 100644
--- a/poky/meta/lib/oeqa/selftest/cases/image_typedep.py
+++ b/poky/meta/lib/oeqa/selftest/cases/image_typedep.py
@@ -29,11 +29,14 @@
# like CONVERSION_DEPENDS_bz2="somedep"
result = bitbake('-e emptytest')
+ dep = None
for line in result.output.split('\n'):
if line.startswith('CONVERSION_DEPENDS_bz2'):
dep = line.split('=')[1].strip('"')
break
+ self.assertIsNotNone(dep, "CONVERSION_DEPENDS_bz2 dependency not found in bitbake -e output")
+
# Now get the dependency task list and check for the expected task
# dependency
bitbake('-g emptytest')
diff --git a/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
index 09e0b20..8c95432 100644
--- a/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
+++ b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -10,8 +10,6 @@
test_user = 'tester'
root_user = 'root'
- buffer = True
-
@OETestID(1107)
def test_non_root_user_can_connect_via_ssh_without_password(self):
"""
@@ -23,7 +21,7 @@
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
- features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password"\n'
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
@@ -49,7 +47,7 @@
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
- features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password"\n'
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
diff --git a/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py b/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py
index 3740715..f992b37 100644
--- a/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py
+++ b/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py
@@ -19,6 +19,8 @@
os.close(lic_file)
self.track_for_cleanup(lic_path)
+ self.write_config("INHERIT_remove = \"report-error\"")
+
self.write_recipeinc('emptytest', """
INHIBIT_DEFAULT_DEPS = "1"
LIC_FILES_CHKSUM = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
@@ -29,7 +31,6 @@
with open(lic_path, "w") as f:
f.write("data")
- self.write_config("INHERIT_remove = \"report-error\"")
result = bitbake(bitbake_cmd, ignore_status=True)
if error_msg not in result.output:
raise AssertionError(result.output)
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py b/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py
index 74ee6a1..15c03f4 100644
--- a/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py
@@ -15,6 +15,7 @@
self.assertEqual(oe.qa.elf_machine_to_string(0x32), "IA-64")
self.assertEqual(oe.qa.elf_machine_to_string(0x3E), "x86-64")
self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xF7), "BPF")
self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py b/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py
index 9fb6c15..789c6f7 100644
--- a/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py
@@ -1,5 +1,8 @@
+import sys
from unittest.case import TestCase
-from oe.utils import packages_filter_out_system, trim_version
+from contextlib import contextmanager
+from io import StringIO
+from oe.utils import packages_filter_out_system, trim_version, multiprocess_launch
class TestPackagesFilterOutSystem(TestCase):
def test_filter(self):
@@ -49,3 +52,48 @@
self.assertEqual(trim_version("1.2.3", 2), "1.2")
self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
+
+
+class TestMultiprocessLaunch(TestCase):
+
+ def test_multiprocesslaunch(self):
+ import bb
+
+ def testfunction(item, d):
+ if item == "2" or item == "1":
+ raise KeyError("Invalid number %s" % item)
+ return "Found %s" % item
+
+ def dummyerror(msg):
+ print("ERROR: %s" % msg)
+ def dummyfatal(msg):
+ print("ERROR: %s" % msg)
+ raise bb.BBHandledException()
+
+ @contextmanager
+ def captured_output():
+ new_out, new_err = StringIO(), StringIO()
+ old_out, old_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = new_out, new_err
+ yield sys.stdout, sys.stderr
+ finally:
+ sys.stdout, sys.stderr = old_out, old_err
+
+ d = bb.data_smart.DataSmart()
+ bb.error = dummyerror
+ bb.fatal = dummyfatal
+
+ # Assert the function returns the right results
+ result = multiprocess_launch(testfunction, ["3", "4", "5", "6"], d, extraargs=(d,))
+ self.assertIn("Found 3", result)
+ self.assertIn("Found 4", result)
+ self.assertIn("Found 5", result)
+ self.assertIn("Found 6", result)
+ self.assertEqual(len(result), 4)
+
+ # Assert the function prints exceptions
+ with captured_output() as (out, err):
+ self.assertRaises(bb.BBHandledException, multiprocess_launch, testfunction, ["1", "2", "3", "4", "5", "6"], d, extraargs=(d,))
+ self.assertIn("KeyError: 'Invalid number 1'", out.getvalue())
+ self.assertIn("KeyError: 'Invalid number 2'", out.getvalue())
diff --git a/poky/meta/lib/oeqa/selftest/cases/oescripts.py b/poky/meta/lib/oeqa/selftest/cases/oescripts.py
index 1ee7537..bcdc2d5 100644
--- a/poky/meta/lib/oeqa/selftest/cases/oescripts.py
+++ b/poky/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -10,6 +10,19 @@
target = 'xcursor-transparent-theme'
self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
+ result = runCmd("oe-pkgdata-util read-value PKGV %s" % target)
+ pkgv = result.output.rstrip()
result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
- expected_output = 'PR changed from "r1" to "r0"'
- self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output)
+ expected_endlines = [
+ "xcursor-transparent-theme-dev: RDEPENDS: removed \"xcursor-transparent-theme (['= %s-r1'])\", added \"xcursor-transparent-theme (['= %s-r0'])\"" % (pkgv, pkgv),
+ "xcursor-transparent-theme-staticdev: RDEPENDS: removed \"xcursor-transparent-theme-dev (['= %s-r1'])\", added \"xcursor-transparent-theme-dev (['= %s-r0'])\"" % (pkgv, pkgv)
+ ]
+ for line in result.output.splitlines():
+ for el in expected_endlines:
+ if line.endswith(el):
+ expected_endlines.remove(el)
+ break
+ else:
+ self.fail('Unexpected line:\n%s\nExpected line endings:\n %s' % (line, '\n '.join(expected_endlines)))
+ if expected_endlines:
+ self.fail('Missing expected line endings:\n %s' % '\n '.join(expected_endlines))
diff --git a/poky/meta/lib/oeqa/selftest/cases/package.py b/poky/meta/lib/oeqa/selftest/cases/package.py
index 169698f..0a88dc2 100644
--- a/poky/meta/lib/oeqa/selftest/cases/package.py
+++ b/poky/meta/lib/oeqa/selftest/cases/package.py
@@ -1,6 +1,7 @@
from oeqa.selftest.case import OESelftestTestCase
from oeqa.core.decorator.oeid import OETestID
-from oeqa.utils.commands import bitbake, get_bb_vars
+from oeqa.utils.commands import bitbake, get_bb_vars, get_bb_var, runqemu
+import stat
import subprocess, os
import oe.path
@@ -29,7 +30,7 @@
cls.bindir = oe.path.join(cls.staging, vars["bindir_native"])
cls.libdir = oe.path.join(cls.staging, vars["libdir_native"])
- def setUp(self):
+ def setUpLocal(self):
# Just for convenience
self.staging = type(self).staging
self.bindir = type(self).bindir
@@ -84,3 +85,65 @@
status = subprocess.call(command, env=env)
self.assertIn(status, (99, 100, 101))
self.assertEqual(status - 100, sort, "%s %s (%d) failed" % (ver1, ver2, sort))
+
+class PackageTests(OESelftestTestCase):
+ # Verify that a recipe which sets up hardlink files has those preserved into split packages
+ # Also test file sparseness is preserved
+ def test_preserve_sparse_hardlinks(self):
+ bitbake("selftest-hardlink -c package")
+
+ dest = get_bb_var('PKGDEST', 'selftest-hardlink')
+ bindir = get_bb_var('bindir', 'selftest-hardlink')
+
+ def checkfiles():
+ # Recipe creates 4 hardlinked files, there is a copy in package/ and a copy in packages-split/
+ # so expect 8 in total.
+ self.assertEqual(os.stat(dest + "/selftest-hardlink" + bindir + "/hello1").st_nlink, 8)
+
+ # Test a sparse file remains sparse
+ sparsestat = os.stat(dest + "/selftest-hardlink" + bindir + "/sparsetest")
+ self.assertEqual(sparsestat.st_blocks, 0)
+ self.assertEqual(sparsestat.st_size, 1048576)
+
+ checkfiles()
+
+ # Clean and reinstall so its now definitely from sstate, then retest.
+ bitbake("selftest-hardlink -c clean")
+ bitbake("selftest-hardlink -c package")
+
+ checkfiles()
+
+ # Verify gdb to read symbols from separated debug hardlink file correctly
+ def test_gdb_hardlink_debug(self):
+ features = 'IMAGE_INSTALL_append = " selftest-hardlink"\n'
+ features += 'IMAGE_INSTALL_append = " selftest-hardlink-dbg"\n'
+ features += 'IMAGE_INSTALL_append = " selftest-hardlink-gdb"\n'
+ self.write_config(features)
+ bitbake("core-image-minimal")
+
+ def gdbtest(qemu, binary):
+ """
+ Check that gdb ``binary`` to read symbols from separated debug file
+ """
+ self.logger.info("gdbtest %s" % binary)
+ status, output = qemu.run_serial('/usr/bin/gdb.sh %s' % binary, timeout=60)
+ for l in output.split('\n'):
+ # Check debugging symbols exists
+ if '(no debugging symbols found)' in l:
+ self.logger.error("No debugging symbols found. GDB result:\n%s" % output)
+ return False
+
+ # Check debugging symbols works correctly
+ elif "Breakpoint 1, main () at hello.c:4" in l:
+ return True
+
+ self.logger.error("GDB result:\n%s: %s" % output)
+ return False
+
+ with runqemu('core-image-minimal') as qemu:
+ for binary in ['/usr/bin/hello1',
+ '/usr/bin/hello2',
+ '/usr/libexec/hello3',
+ '/usr/libexec/hello4']:
+ if not gdbtest(qemu, binary):
+ self.fail('GDB %s failed' % binary)
diff --git a/poky/meta/lib/oeqa/selftest/cases/recipetool.py b/poky/meta/lib/oeqa/selftest/cases/recipetool.py
index 437eb2a..06f980e 100644
--- a/poky/meta/lib/oeqa/selftest/cases/recipetool.py
+++ b/poky/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -427,6 +427,8 @@
@OETestID(1418)
def test_recipetool_create_cmake(self):
+ bitbake('-c packagedata gtk+')
+
# Try adding a recipe
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
@@ -439,7 +441,7 @@
checkvars['SRC_URI'] = 'http://downloads.yoctoproject.org/mirror/sources/navit-${PV}.tar.gz'
checkvars['SRC_URI[md5sum]'] = '242f398e979a6b8c0f3c802b63435b68'
checkvars['SRC_URI[sha256sum]'] = '13353481d7fc01a4f64e385dda460b51496366bba0fd2cc85a89a0747910e94d'
- checkvars['DEPENDS'] = set(['freetype', 'zlib', 'openssl', 'glib-2.0', 'virtual/libgl', 'virtual/egl', 'gtk+', 'libpng', 'libsdl', 'freeglut', 'dbus-glib'])
+ checkvars['DEPENDS'] = set(['freetype', 'zlib', 'openssl', 'glib-2.0', 'virtual/libgl', 'virtual/egl', 'gtk+', 'libpng', 'libsdl', 'freeglut', 'dbus-glib', 'fribidi'])
inherits = ['cmake', 'python-dir', 'gettext', 'pkgconfig']
self._test_recipe_contents(recipefile, checkvars, inherits)
diff --git a/poky/meta/lib/oeqa/selftest/cases/runqemu.py b/poky/meta/lib/oeqa/selftest/cases/runqemu.py
index a758aaf..4e35bb9 100644
--- a/poky/meta/lib/oeqa/selftest/cases/runqemu.py
+++ b/poky/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -14,8 +14,6 @@
image_is_ready = False
deploy_dir_image = ''
- # We only want to print runqemu stdout/stderr if there is a test case failure
- buffer = True
def setUpLocal(self):
super(RunqemuTests, self).setUpLocal()
@@ -176,14 +174,17 @@
# when qemu was shutdown by the above shutdown command
qemu.runner.stop_thread()
time_track = 0
- while True:
- is_alive = qemu.check()
- if not is_alive:
- return True
- if time_track > timeout:
- return False
- time.sleep(1)
- time_track += 1
+ try:
+ while True:
+ is_alive = qemu.check()
+ if not is_alive:
+ return True
+ if time_track > timeout:
+ return False
+ time.sleep(1)
+ time_track += 1
+ except SystemExit:
+ return True
def test_qemu_can_shutdown(self):
self.assertExists(self.qemuboot_conf)
diff --git a/poky/meta/lib/oeqa/selftest/cases/runtime_test.py b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py
index 146daf8..906e460 100644
--- a/poky/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -122,6 +122,7 @@
self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
features = 'INHERIT += "testimage"\n'
+ features += 'IMAGE_INSTALL_append = " libssl"\n'
features += 'TEST_SUITES = "ping ssh selftest"\n'
self.write_config(features)
@@ -135,7 +136,7 @@
Summary: Check package feeds functionality for dnf
Expected: 1. Check that remote package feeds can be accessed
Product: oe-core
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
if get_bb_var('DISTRO') == 'poky-tiny':
self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
@@ -233,7 +234,7 @@
Expected: The scriptlet failure is properly reported.
The file that is created after the error in the scriptlet is not present.
Product: oe-core
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
import oe.path
@@ -251,8 +252,8 @@
features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-rootfs-failing"\n'
features += 'PACKAGE_CLASSES = "%s"\n' % classes
self.write_config(features)
- bb_result = bitbake('core-image-minimal')
- self.assertGreaterEqual(bb_result.output.find("Intentionally failing postinstall scriptlets of ['postinst-rootfs-failing'] to defer them to first boot is deprecated."), 0,
+ bb_result = bitbake('core-image-minimal', ignore_status=True)
+ self.assertGreaterEqual(bb_result.output.find("Postinstall scriptlets of ['postinst-rootfs-failing'] have failed."), 0,
"Warning about a failed scriptlet not found in bitbake output: %s" %(bb_result.output))
self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs-before-failure")),
diff --git a/poky/meta/lib/oeqa/selftest/cases/signing.py b/poky/meta/lib/oeqa/selftest/cases/signing.py
index 0edaf40..4fa99ac 100644
--- a/poky/meta/lib/oeqa/selftest/cases/signing.py
+++ b/poky/meta/lib/oeqa/selftest/cases/signing.py
@@ -59,7 +59,7 @@
Expected: Images can be created from signed packages
Product: oe-core
Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ Author: Alexander Kanavin <alex.kanavin@gmail.com>
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
import oe.packagedata
diff --git a/poky/meta/lib/oeqa/selftest/cases/sstatetests.py b/poky/meta/lib/oeqa/selftest/cases/sstatetests.py
index 7b008e4..077d6e5 100644
--- a/poky/meta/lib/oeqa/selftest/cases/sstatetests.py
+++ b/poky/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -262,6 +262,7 @@
self.write_config("""
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+TCLIBCAPPEND = ""
BUILD_ARCH = "x86_64"
BUILD_OS = "linux"
SDKMACHINE = "x86_64"
@@ -272,6 +273,7 @@
self.write_config("""
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+TCLIBCAPPEND = ""
BUILD_ARCH = "i686"
BUILD_OS = "linux"
SDKMACHINE = "i686"
@@ -307,12 +309,14 @@
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
NATIVELSBSTRING = \"DistroA\"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
bitbake("core-image-sato -S none")
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
NATIVELSBSTRING = \"DistroB\"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
@@ -340,25 +344,25 @@
configA = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86-64\"
"""
configB = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemuarm\"
"""
self.sstate_allarch_samesigs(configA, configB)
@OETestID(1645)
- def test_sstate_allarch_samesigs_multilib(self):
+ def test_sstate_nativesdk_samesigs_multilib(self):
"""
- The sstate checksums of allarch multilib packages should be independent of whichever
- MACHINE is set. Check this using bitbake -S.
- Also, rather than duplicate the test, check nativesdk stamps are the same between
- the two MACHINE values.
+ check nativesdk stamps are the same between the two MACHINE values.
"""
configA = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86-64\"
require conf/multilib.conf
MULTILIBS = \"multilib:lib32\"
@@ -366,6 +370,7 @@
"""
configB = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemuarm\"
require conf/multilib.conf
MULTILIBS = \"\"
@@ -392,10 +397,6 @@
(_, task, _, shash) = name.rsplit(".", 3)
f[os.path.join(os.path.basename(root), task)] = shash
return f
- files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/all" + self.target_vendor + "-" + self.target_os)
- files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/all" + self.target_vendor + "-" + self.target_os)
- self.maxDiff = None
- self.assertEqual(files1, files2)
nativesdkdir = os.path.basename(glob.glob(self.topdir + "/tmp-sstatesamehash/stamps/*-nativesdk*-linux")[0])
@@ -414,6 +415,7 @@
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
@@ -423,6 +425,7 @@
bitbake("world meta-toolchain -S none")
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
MACHINE = \"qemux86copy\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
@@ -458,6 +461,7 @@
self.write_config("""
TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+TCLIBCAPPEND = ""
BB_NUMBER_THREADS = "${@oe.utils.cpu_count()}"
PARALLEL_MAKE = "-j 1"
DL_DIR = "${TOPDIR}/download1"
@@ -471,6 +475,7 @@
bitbake("world meta-toolchain -S none")
self.write_config("""
TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+TCLIBCAPPEND = ""
BB_NUMBER_THREADS = "${@oe.utils.cpu_count()+1}"
PARALLEL_MAKE = "-j 2"
DL_DIR = "${TOPDIR}/download2"
diff --git a/poky/meta/lib/oeqa/selftest/cases/wic.py b/poky/meta/lib/oeqa/selftest/cases/wic.py
index baf3af6..36ee5e5 100644
--- a/poky/meta/lib/oeqa/selftest/cases/wic.py
+++ b/poky/meta/lib/oeqa/selftest/cases/wic.py
@@ -61,94 +61,103 @@
return wrapper
-class Wic(OESelftestTestCase):
+class WicTestCase(OESelftestTestCase):
"""Wic test class."""
image_is_ready = False
- native_sysroot = None
wicenv_cache = {}
def setUpLocal(self):
"""This code is executed before each test method."""
self.resultdir = self.builddir + "/wic-tmp/"
- super(Wic, self).setUpLocal()
- if not self.native_sysroot:
- Wic.native_sysroot = get_bb_var('STAGING_DIR_NATIVE', 'wic-tools')
+ super(WicTestCase, self).setUpLocal()
# Do this here instead of in setUpClass as the base setUp does some
# clean up which can result in the native tools built earlier in
# setUpClass being unavailable.
- if not Wic.image_is_ready:
+ if not WicTestCase.image_is_ready:
if get_bb_var('USE_NLS') == 'yes':
bitbake('wic-tools')
else:
self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable')
bitbake('core-image-minimal')
- Wic.image_is_ready = True
+ WicTestCase.image_is_ready = True
rmtree(self.resultdir, ignore_errors=True)
def tearDownLocal(self):
"""Remove resultdir as it may contain images."""
rmtree(self.resultdir, ignore_errors=True)
- super(Wic, self).tearDownLocal()
+ super(WicTestCase, self).tearDownLocal()
+
+ def _get_image_env_path(self, image):
+ """Generate and obtain the path to <image>.env"""
+ if image not in WicTestCase.wicenv_cache:
+ self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
+ bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image)
+ stdir = bb_vars['STAGING_DIR']
+ machine = bb_vars['MACHINE']
+ WicTestCase.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata')
+ return WicTestCase.wicenv_cache[image]
+
+class Wic(WicTestCase):
@OETestID(1552)
def test_version(self):
"""Test wic --version"""
- self.assertEqual(0, runCmd('wic --version').status)
+ runCmd('wic --version')
@OETestID(1208)
def test_help(self):
"""Test wic --help and wic -h"""
- self.assertEqual(0, runCmd('wic --help').status)
- self.assertEqual(0, runCmd('wic -h').status)
+ runCmd('wic --help')
+ runCmd('wic -h')
@OETestID(1209)
def test_createhelp(self):
"""Test wic create --help"""
- self.assertEqual(0, runCmd('wic create --help').status)
+ runCmd('wic create --help')
@OETestID(1210)
def test_listhelp(self):
"""Test wic list --help"""
- self.assertEqual(0, runCmd('wic list --help').status)
+ runCmd('wic list --help')
@OETestID(1553)
def test_help_create(self):
"""Test wic help create"""
- self.assertEqual(0, runCmd('wic help create').status)
+ runCmd('wic help create')
@OETestID(1554)
def test_help_list(self):
"""Test wic help list"""
- self.assertEqual(0, runCmd('wic help list').status)
+ runCmd('wic help list')
@OETestID(1215)
def test_help_overview(self):
"""Test wic help overview"""
- self.assertEqual(0, runCmd('wic help overview').status)
+ runCmd('wic help overview')
@OETestID(1216)
def test_help_plugins(self):
"""Test wic help plugins"""
- self.assertEqual(0, runCmd('wic help plugins').status)
+ runCmd('wic help plugins')
@OETestID(1217)
def test_help_kickstart(self):
"""Test wic help kickstart"""
- self.assertEqual(0, runCmd('wic help kickstart').status)
+ runCmd('wic help kickstart')
@OETestID(1555)
def test_list_images(self):
"""Test wic list images"""
- self.assertEqual(0, runCmd('wic list images').status)
+ runCmd('wic list images')
@OETestID(1556)
def test_list_source_plugins(self):
"""Test wic list source-plugins"""
- self.assertEqual(0, runCmd('wic list source-plugins').status)
+ runCmd('wic list source-plugins')
@OETestID(1557)
def test_listed_images_help(self):
@@ -156,7 +165,7 @@
output = runCmd('wic list images').output
imagelist = [line.split()[0] for line in output.splitlines()]
for image in imagelist:
- self.assertEqual(0, runCmd('wic list %s help' % image).status)
+ runCmd('wic list %s help' % image)
@OETestID(1213)
def test_unsupported_subcommand(self):
@@ -172,7 +181,7 @@
def test_build_image_name(self):
"""Test wic create wictestdisk --image-name=core-image-minimal"""
cmd = "wic create wictestdisk --image-name=core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1157)
@@ -180,7 +189,7 @@
def test_gpt_image(self):
"""Test creation of core-image-minimal with gpt table and UUID boot"""
cmd = "wic create directdisk-gpt --image-name core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@OETestID(1346)
@@ -194,7 +203,7 @@
bitbake('core-image-minimal core-image-minimal-initramfs')
self.remove_config(config)
cmd = "wic create mkhybridiso --image-name core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
@@ -203,7 +212,7 @@
def test_qemux86_directdisk(self):
"""Test creation of qemux-86-directdisk image"""
cmd = "wic create qemux86-directdisk -e core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct")))
@OETestID(1350)
@@ -211,7 +220,7 @@
def test_mkefidisk(self):
"""Test creation of mkefidisk image"""
cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct")))
@OETestID(1385)
@@ -223,7 +232,7 @@
bitbake('core-image-minimal')
self.remove_config(config)
cmd = "wic create directdisk-bootloader-config -e core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct")))
@OETestID(1560)
@@ -235,7 +244,7 @@
bitbake('core-image-minimal')
self.remove_config(config)
cmd = "wic create systemd-bootdisk -e core-image-minimal -o %s" % self.resultdir
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct")))
@OETestID(1561)
@@ -244,7 +253,7 @@
cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir
kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal')
self.write_config('IMAGE_BOOT_FILES = "%s"\n' % kimgtype)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
@OETestID(1562)
@@ -258,7 +267,7 @@
bitbake('core-image-minimal')
self.remove_config(config)
cmd = "wic create directdisk -e core-image-minimal"
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.assertEqual(1, len(glob("directdisk-*.direct")))
@OETestID(1212)
@@ -271,37 +280,36 @@
'core-image-minimal'))
bbvars = {key.lower(): value for key, value in bb_vars.items()}
bbvars['resultdir'] = self.resultdir
- status = runCmd("wic create directdisk "
+ runCmd("wic create directdisk "
"-b %(staging_datadir)s "
"-k %(deploy_dir_image)s "
"-n %(recipe_sysroot_native)s "
"-r %(image_rootfs)s "
- "-o %(resultdir)s" % bbvars).status
- self.assertEqual(0, status)
+ "-o %(resultdir)s" % bbvars)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@OETestID(1264)
def test_compress_gzip(self):
"""Test compressing an image with gzip"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name core-image-minimal "
- "-c gzip -o %s" % self.resultdir).status)
+ "-c gzip -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.gz")))
@OETestID(1265)
def test_compress_bzip2(self):
"""Test compressing an image with bzip2"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-c bzip2 -o %s" % self.resultdir).status)
+ "-c bzip2 -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.bz2")))
@OETestID(1266)
def test_compress_xz(self):
"""Test compressing an image with xz"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "--compress-with=xz -o %s" % self.resultdir).status)
+ "--compress-with=xz -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.xz")))
@OETestID(1267)
@@ -315,63 +323,62 @@
@OETestID(1558)
def test_debug_short(self):
"""Test -D option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1658)
def test_debug_long(self):
"""Test --debug option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "--debug -o %s" % self.resultdir).status)
+ "--debug -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1563)
def test_skip_build_check_short(self):
"""Test -s option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-s -o %s" % self.resultdir).status)
+ "-s -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1671)
def test_skip_build_check_long(self):
"""Test --skip-build-check option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
"--skip-build-check "
- "--outdir %s" % self.resultdir).status)
+ "--outdir %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1564)
def test_build_rootfs_short(self):
"""Test -f option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-f -o %s" % self.resultdir).status)
+ "-f -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1656)
def test_build_rootfs_long(self):
"""Test --build-rootfs option"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
"--build-rootfs "
- "--outdir %s" % self.resultdir).status)
+ "--outdir %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
@OETestID(1268)
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_rootfs_indirect_recipes(self):
"""Test usage of rootfs plugin with rootfs recipes"""
- status = runCmd("wic create directdisk-multi-rootfs "
+ runCmd("wic create directdisk-multi-rootfs "
"--image-name=core-image-minimal "
"--rootfs rootfs1=core-image-minimal "
"--rootfs rootfs2=core-image-minimal "
- "--outdir %s" % self.resultdir).status
- self.assertEqual(0, status)
+ "--outdir %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct")))
@OETestID(1269)
@@ -385,14 +392,13 @@
bbvars = {key.lower(): value for key, value in bb_vars.items()}
bbvars['wks'] = "directdisk-multi-rootfs"
bbvars['resultdir'] = self.resultdir
- status = runCmd("wic create %(wks)s "
+ runCmd("wic create %(wks)s "
"--bootimg-dir=%(staging_datadir)s "
"--kernel-dir=%(deploy_dir_image)s "
"--native-sysroot=%(recipe_sysroot_native)s "
"--rootfs-dir rootfs1=%(image_rootfs)s "
"--rootfs-dir rootfs2=%(image_rootfs)s "
- "--outdir %(resultdir)s" % bbvars).status
- self.assertEqual(0, status)
+ "--outdir %(resultdir)s" % bbvars)
self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars)))
@OETestID(1661)
@@ -411,8 +417,8 @@
part /usr --source rootfs --ondisk mmcblk0 --fstype=ext4 --rootfs-dir %s/usr
part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --rootfs-dir %s/usr"""
% (rootfs_dir, rootfs_dir))
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
- % (wks_file, self.resultdir)).status)
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir))
os.remove(wks_file)
wicout = glob(self.resultdir + "%s-*direct" % 'temp')
@@ -422,7 +428,6 @@
# verify partition size with wic
res = runCmd("parted -m %s unit b p 2>/dev/null" % wicimg)
- self.assertEqual(0, res.status)
# parse parted output which looks like this:
# BYT;\n
@@ -438,8 +443,8 @@
self.assertEqual(7, len(partln))
start = int(partln[1].rstrip("B")) / 512
length = int(partln[3].rstrip("B")) / 512
- self.assertEqual(0, runCmd("dd if=%s of=%s skip=%d count=%d" %
- (wicimg, part_file, start, length)).status)
+ runCmd("dd if=%s of=%s skip=%d count=%d" %
+ (wicimg, part_file, start, length))
def extract_files(debugfs_output):
"""
@@ -463,7 +468,6 @@
# /usr.
res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
os.path.join(self.resultdir, "selftest_img.part1"))
- self.assertEqual(0, res.status)
files = extract_files(res.output)
self.assertIn("etc", files)
self.assertNotIn("usr", files)
@@ -472,7 +476,6 @@
# directories.
res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
os.path.join(self.resultdir, "selftest_img.part2"))
- self.assertEqual(0, res.status)
files = extract_files(res.output)
self.assertNotIn("etc", files)
self.assertNotIn("usr", files)
@@ -482,7 +485,6 @@
# directory, but not the files inside it.
res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
os.path.join(self.resultdir, "selftest_img.part3"))
- self.assertEqual(0, res.status)
files = extract_files(res.output)
self.assertNotIn("etc", files)
self.assertNotIn("usr", files)
@@ -490,7 +492,6 @@
self.assertIn("bin", files)
res = runCmd("debugfs -R 'ls -p bin' %s 2>/dev/null" % \
os.path.join(self.resultdir, "selftest_img.part3"))
- self.assertEqual(0, res.status)
files = extract_files(res.output)
self.assertIn(".", files)
self.assertIn("..", files)
@@ -522,12 +523,13 @@
% (wks_file, self.resultdir), ignore_status=True).status)
os.remove(wks_file)
+class Wic2(WicTestCase):
+
@OETestID(1496)
def test_bmap_short(self):
"""Test generation of .bmap file -m option"""
cmd = "wic create wictestdisk -e core-image-minimal -m -o %s" % self.resultdir
- status = runCmd(cmd).status
- self.assertEqual(0, status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
@@ -535,21 +537,10 @@
def test_bmap_long(self):
"""Test generation of .bmap file --bmap option"""
cmd = "wic create wictestdisk -e core-image-minimal --bmap -o %s" % self.resultdir
- status = runCmd(cmd).status
- self.assertEqual(0, status)
+ runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
- def _get_image_env_path(self, image):
- """Generate and obtain the path to <image>.env"""
- if image not in self.wicenv_cache:
- self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
- bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image)
- stdir = bb_vars['STAGING_DIR']
- machine = bb_vars['MACHINE']
- self.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata')
- return self.wicenv_cache[image]
-
@OETestID(1347)
def test_image_env(self):
"""Test generation of <image>.env files."""
@@ -580,10 +571,10 @@
imgenvdir = self._get_image_env_path(image)
native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=%s -v %s -n %s -o %s"
% (image, imgenvdir, native_sysroot,
- self.resultdir)).status)
+ self.resultdir))
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
@OETestID(1665)
@@ -593,13 +584,13 @@
imgenvdir = self._get_image_env_path(image)
native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=%s "
"--vars %s "
"--native-sysroot %s "
"--outdir %s"
% (image, imgenvdir, native_sysroot,
- self.resultdir)).status)
+ self.resultdir))
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
@OETestID(1351)
@@ -679,19 +670,21 @@
Test creation of a simple image with partition size controlled through
--fixed-size flag
"""
- wkspath, wksname = Wic._make_fixed_size_wks(200)
+ wkspath, wksname = Wic2._make_fixed_size_wks(200)
- self.assertEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
- % (wkspath, self.resultdir)).status)
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wkspath, self.resultdir))
os.remove(wkspath)
wicout = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(wicout))
wicimg = wicout[0]
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
# verify partition size with wic
res = runCmd("parted -m %s unit mib p 2>/dev/null" % wicimg,
- native_sysroot=self.native_sysroot)
+ native_sysroot=native_sysroot)
# parse parted output which looks like this:
# BYT;\n
@@ -709,7 +702,7 @@
--fixed-size flag. The size of partition is intentionally set to 1MiB
in order to trigger an error in wic.
"""
- wkspath, wksname = Wic._make_fixed_size_wks(1)
+ wkspath, wksname = Wic2._make_fixed_size_wks(1)
self.assertEqual(1, runCmd("wic create %s -e core-image-minimal -o %s" \
% (wkspath, self.resultdir), ignore_status=True).status)
@@ -746,7 +739,7 @@
'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n'])
wks.flush()
cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
wksname = os.path.splitext(os.path.basename(wks.name))[0]
out = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(out))
@@ -763,10 +756,10 @@
'part emptyvfat --fstype vfat --size 1M\n',
'part emptymsdos --fstype msdos --size 1M\n',
'part emptyext2 --fstype ext2 --size 1M\n',
- 'part emptybtrfs --fstype btrfs --size 100M\n'])
+ 'part emptybtrfs --fstype btrfs --size 150M\n'])
wks.flush()
cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
wksname = os.path.splitext(os.path.basename(wks.name))[0]
out = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(out))
@@ -779,7 +772,7 @@
'--overhead-factor 1.2 --size 100k\n'])
wks.flush()
cmd = "wic create %s -e core-image-minimal -o %s" % (wks.name, self.resultdir)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
wksname = os.path.splitext(os.path.basename(wks.name))[0]
out = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(out))
@@ -791,7 +784,7 @@
cmd = "wic create sdimage-bootpart -e %s -o %s" % (img, self.resultdir)
config = 'IMAGE_BOOT_FILES = "%s*"' % get_bb_var('KERNEL_IMAGETYPE', img)
self.append_config(config)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
self.remove_config(config)
self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
@@ -827,9 +820,9 @@
@OETestID(1857)
def test_wic_ls(self):
"""Test listing image content using 'wic ls'"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "wictestdisk-*.direct")
self.assertEqual(1, len(images))
@@ -837,20 +830,18 @@
# list partitions
result = runCmd("wic ls %s -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertEqual(3, len(result.output.split('\n')))
# list directory content of the first partition
result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertEqual(6, len(result.output.split('\n')))
@OETestID(1856)
def test_wic_cp(self):
"""Test copy files and directories to the the wic image."""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "wictestdisk-*.direct")
self.assertEqual(1, len(images))
@@ -858,19 +849,16 @@
# list directory content of the first partition
result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertEqual(6, len(result.output.split('\n')))
with NamedTemporaryFile("w", suffix=".wic-cp") as testfile:
testfile.write("test")
# copy file to the partition
- result = runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot))
# check if file is there
result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertEqual(7, len(result.output.split('\n')))
self.assertTrue(os.path.basename(testfile.name) in result.output)
@@ -881,21 +869,19 @@
copy(testfile.name, testdir)
# copy directory to the partition
- result = runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot))
# check if directory is there
result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertEqual(8, len(result.output.split('\n')))
self.assertTrue(os.path.basename(testdir) in result.output)
@OETestID(1858)
def test_wic_rm(self):
"""Test removing files and directories from the the wic image."""
- self.assertEqual(0, runCmd("wic create mkefidisk "
+ runCmd("wic create mkefidisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "mkefidisk-*.direct")
self.assertEqual(1, len(images))
@@ -903,21 +889,17 @@
# list directory content of the first partition
result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertIn('\nBZIMAGE ', result.output)
self.assertIn('\nEFI <DIR> ', result.output)
# remove file
- result = runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot))
# remove directory
- result = runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot))
# check if they're removed
result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertNotIn('\nBZIMAGE ', result.output)
self.assertNotIn('\nEFI <DIR> ', result.output)
@@ -936,7 +918,7 @@
'part emptybtrfs --fstype btrfs --size 100M --mkfs-extraopts "--mixed -K"\n'])
wks.flush()
cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
wksname = os.path.splitext(os.path.basename(wks.name))[0]
out = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(out))
@@ -966,7 +948,7 @@
sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
cmd = "wic write -n %s --expand 1:0 %s %s" % (sysroot, image_path, new_image_path)
- self.assertEqual(0, runCmd(cmd).status)
+ runCmd(cmd)
# check if partitions are expanded
orig = runCmd("wic ls %s -n %s" % (image_path, sysroot))
@@ -996,9 +978,9 @@
def test_wic_ls_ext(self):
"""Test listing content of the ext partition using 'wic ls'"""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "wictestdisk-*.direct")
self.assertEqual(1, len(images))
@@ -1006,15 +988,14 @@
# list directory content of the second ext4 partition
result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(
set(line.split()[-1] for line in result.output.split('\n') if line)))
def test_wic_cp_ext(self):
"""Test copy files and directories to the ext partition."""
- self.assertEqual(0, runCmd("wic create wictestdisk "
+ runCmd("wic create wictestdisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "wictestdisk-*.direct")
self.assertEqual(1, len(images))
@@ -1022,7 +1003,6 @@
# list directory content of the ext4 partition
result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
dirs = set(line.split()[-1] for line in result.output.split('\n') if line)
self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(dirs))
@@ -1030,20 +1010,18 @@
testfile.write("test")
# copy file to the partition
- result = runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot))
# check if file is there
result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
newdirs = set(line.split()[-1] for line in result.output.split('\n') if line)
self.assertEqual(newdirs.difference(dirs), set([os.path.basename(testfile.name)]))
def test_wic_rm_ext(self):
"""Test removing files from the ext partition."""
- self.assertEqual(0, runCmd("wic create mkefidisk "
+ runCmd("wic create mkefidisk "
"--image-name=core-image-minimal "
- "-D -o %s" % self.resultdir).status)
+ "-D -o %s" % self.resultdir)
images = glob(self.resultdir + "mkefidisk-*.direct")
self.assertEqual(1, len(images))
@@ -1051,14 +1029,11 @@
# list directory content of the /etc directory on ext4 partition
result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line])
# remove file
- result = runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
+ runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot))
# check if it's removed
result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
- self.assertEqual(0, result.status)
self.assertTrue('fstab' not in [line.split()[-1] for line in result.output.split('\n') if line])
diff --git a/poky/meta/lib/oeqa/selftest/context.py b/poky/meta/lib/oeqa/selftest/context.py
index 9a56888..c521290 100644
--- a/poky/meta/lib/oeqa/selftest/context.py
+++ b/poky/meta/lib/oeqa/selftest/context.py
@@ -11,6 +11,7 @@
from random import choice
import oeqa
+import oe
from oeqa.core.context import OETestContext, OETestContextExecutor
from oeqa.core.exception import OEQAPreRun, OEQATestNotFound
@@ -25,14 +26,14 @@
self.custommachine = None
self.config_paths = config_paths
- def runTests(self, machine=None, skips=[]):
+ def runTests(self, processes=None, machine=None, skips=[]):
if machine:
self.custommachine = machine
if machine == 'random':
self.custommachine = choice(self.machines)
self.logger.info('Run tests with custom MACHINE set to: %s' % \
self.custommachine)
- return super(OESelftestTestContext, self).runTests(skips)
+ return super(OESelftestTestContext, self).runTests(processes, skips)
def listTests(self, display_type, machine=None):
return super(OESelftestTestContext, self).listTests(display_type)
@@ -68,6 +69,9 @@
action="store_true", default=False,
help='List all available tests.')
+ parser.add_argument('-j', '--num-processes', dest='processes', action='store',
+ type=int, help="number of processes to execute in parallel with")
+
parser.add_argument('--machine', required=False, choices=['random', 'all'],
help='Run tests on different machines (random/all).')
@@ -96,7 +100,6 @@
return cases_paths
def _process_args(self, logger, args):
-
args.test_start_time = time.strftime("%Y%m%d%H%M%S")
args.test_data_file = None
args.CASES_PATHS = None
@@ -143,6 +146,7 @@
self.tc_kwargs['init']['config_paths']['bblayers_backup'])
self.tc_kwargs['run']['skips'] = args.skips
+ self.tc_kwargs['run']['processes'] = args.processes
def _pre_run(self):
def _check_required_env_variables(vars):
@@ -158,7 +162,7 @@
os.chdir(builddir)
if not "meta-selftest" in self.tc.td["BBLAYERS"]:
- self.tc.logger.warn("meta-selftest layer not found in BBLAYERS, adding it")
+ self.tc.logger.warning("meta-selftest layer not found in BBLAYERS, adding it")
meta_selftestdir = os.path.join(
self.tc.td["BBLAYERS_FETCH_DIR"], 'meta-selftest')
if os.path.isdir(meta_selftestdir):
@@ -189,6 +193,10 @@
self.tc.logger.error("You have buildhistory enabled already and this isn't recommended for selftest, please disable it first.")
raise OEQAPreRun
+ if "rm_work.bbclass" in self.tc.td["BBINCLUDED"]:
+ self.tc.logger.error("You have rm_work enabled which isn't recommended while running oe-selftest. Please disable it before continuing.")
+ raise OEQAPreRun
+
if "PRSERV_HOST" in self.tc.td:
self.tc.logger.error("Please unset PRSERV_HOST in order to run oe-selftest")
raise OEQAPreRun
@@ -199,8 +207,8 @@
_add_layer_libs()
- self.tc.logger.info("Running bitbake -p")
- runCmd("bitbake -p")
+ self.tc.logger.info("Running bitbake -e to test the configuration is valid/parsable")
+ runCmd("bitbake -e")
def get_json_result_dir(self, args):
json_result_dir = os.path.join(self.tc.td["LOG_DIR"], 'oeqa')
@@ -216,7 +224,7 @@
configuration = {'TEST_TYPE': 'oeselftest',
'STARTTIME': args.test_start_time,
'MACHINE': self.tc.td["MACHINE"],
- 'HOST_DISTRO': ('-'.join(platform.linux_distribution())).replace(' ', '-'),
+ 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
'HOST_NAME': metadata['hostname'],
'LAYERS': metadata['layers']}
return configuration
diff --git a/poky/meta/lib/oeqa/utils/httpserver.py b/poky/meta/lib/oeqa/utils/httpserver.py
index 7d12331..a48d499 100644
--- a/poky/meta/lib/oeqa/utils/httpserver.py
+++ b/poky/meta/lib/oeqa/utils/httpserver.py
@@ -1,13 +1,13 @@
import http.server
import multiprocessing
import os
+import traceback
+import signal
from socketserver import ThreadingMixIn
class HTTPServer(ThreadingMixIn, http.server.HTTPServer):
- def server_start(self, root_dir):
- import signal
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ def server_start(self, root_dir, logger):
os.chdir(root_dir)
self.serve_forever()
@@ -18,19 +18,40 @@
class HTTPService(object):
- def __init__(self, root_dir, host=''):
+ def __init__(self, root_dir, host='', logger=None):
self.root_dir = root_dir
self.host = host
self.port = 0
+ self.logger = logger
def start(self):
+ if not os.path.exists(self.root_dir):
+ self.logger.info("Not starting HTTPService for directory %s which doesn't exist" % (self.root_dir))
+ return
+
self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
if self.port == 0:
self.port = self.server.server_port
- self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
+ self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir, self.logger])
+
+ # The signal handler from testimage.bbclass can cause deadlocks here
+ # if the HTTPServer is terminated before it can restore the standard
+ #signal behaviour
+ orig = signal.getsignal(signal.SIGTERM)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.process.start()
+ signal.signal(signal.SIGTERM, orig)
+
+ if self.logger:
+ self.logger.info("Started HTTPService on %s:%s" % (self.host, self.port))
+
def stop(self):
- self.server.server_close()
- self.process.terminate()
- self.process.join()
+ if hasattr(self, "server"):
+ self.server.server_close()
+ if hasattr(self, "process"):
+ self.process.terminate()
+ self.process.join()
+ if self.logger:
+ self.logger.info("Stopped HTTPService on %s:%s" % (self.host, self.port))
+
diff --git a/poky/meta/lib/oeqa/utils/package_manager.py b/poky/meta/lib/oeqa/utils/package_manager.py
index afd5b8e..1495f87 100644
--- a/poky/meta/lib/oeqa/utils/package_manager.py
+++ b/poky/meta/lib/oeqa/utils/package_manager.py
@@ -22,13 +22,15 @@
pm = OpkgPM(d,
root_path,
d.getVar("IPKGCONF_TARGET"),
- d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
+ d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ filterbydependencies=False)
elif pkg_class == "deb":
pm = DpkgPM(d,
root_path,
d.getVar('PACKAGE_ARCHS'),
- d.getVar('DPKG_ARCH'))
+ d.getVar('DPKG_ARCH'),
+ filterbydependencies=False)
pm.write_index()
pm.update()
diff --git a/poky/meta/lib/oeqa/utils/qemurunner.py b/poky/meta/lib/oeqa/utils/qemurunner.py
index b87d776..c7442a2 100644
--- a/poky/meta/lib/oeqa/utils/qemurunner.py
+++ b/poky/meta/lib/oeqa/utils/qemurunner.py
@@ -350,10 +350,10 @@
return True
def stop(self):
- self.stop_thread()
- self.stop_qemu_system()
if hasattr(self, "origchldhandler"):
signal.signal(signal.SIGCHLD, self.origchldhandler)
+ self.stop_thread()
+ self.stop_qemu_system()
if self.runqemu:
if hasattr(self, "monitorpid"):
os.kill(self.monitorpid, signal.SIGKILL)
@@ -392,7 +392,7 @@
# qemu-system behaves well and a SIGTERM is enough
os.kill(self.qemupid, signal.SIGTERM)
except ProcessLookupError as e:
- self.logger.warn('qemu-system ended unexpectedly')
+ self.logger.warning('qemu-system ended unexpectedly')
def stop_thread(self):
if self.thread and self.thread.is_alive():
@@ -408,7 +408,7 @@
return False
def is_alive(self):
- if not self.runqemu:
+ if not self.runqemu or self.runqemu.poll() is not None:
return False
if os.path.isfile(self.qemu_pidfile):
f = open(self.qemu_pidfile, 'r')
@@ -469,7 +469,7 @@
def _dump_host(self):
self.host_dumper.create_dir("qemu")
- self.logger.warn("Qemu ended unexpectedly, dump data from host"
+ self.logger.warning("Qemu ended unexpectedly, dump data from host"
" is in %s" % self.host_dumper.dump_dir)
self.host_dumper.dump_host()