| #!/usr/bin/env python3 |
| |
| # Handle running OE images standalone with QEMU |
| # |
| # Copyright (C) 2006-2011 Linux Foundation |
| # Copyright (c) 2016 Wind River Systems, Inc. |
| # |
| # This program is free software; you can redistribute it and/or modify |
| # it under the terms of the GNU General Public License version 2 as |
| # published by the Free Software Foundation. |
| # |
| # This program is distributed in the hope that it will be useful, |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| # GNU General Public License for more details. |
| # |
| # You should have received a copy of the GNU General Public License along |
| # with this program; if not, write to the Free Software Foundation, Inc., |
| # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
| |
| import os |
| import sys |
| import logging |
| import subprocess |
| import re |
| import fcntl |
| import shutil |
| import glob |
| import configparser |
| import signal |
| |
| class RunQemuError(Exception): |
| """Custom exception to raise on known errors.""" |
| pass |
| |
| class OEPathError(RunQemuError): |
| """Custom Exception to give better guidance on missing binaries""" |
| def __init__(self, message): |
| super().__init__("In order for this script to dynamically infer paths\n \ |
| kernels or filesystem images, you either need bitbake in your PATH\n \ |
| or to source oe-init-build-env before running this script.\n\n \ |
| Dynamic path inference can be avoided by passing a *.qemuboot.conf to\n \ |
| runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message) |
| |
| |
| def create_logger(): |
| logger = logging.getLogger('runqemu') |
| logger.setLevel(logging.INFO) |
| |
| # create console handler and set level to debug |
| ch = logging.StreamHandler() |
| ch.setLevel(logging.DEBUG) |
| |
| # create formatter |
| formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') |
| |
| # add formatter to ch |
| ch.setFormatter(formatter) |
| |
| # add ch to logger |
| logger.addHandler(ch) |
| |
| return logger |
| |
| logger = create_logger() |
| |
| def print_usage(): |
| print(""" |
| Usage: you can run this script with any valid combination |
| of the following environment variables (in any order): |
| KERNEL - the kernel image file to use |
| ROOTFS - the rootfs image file or nfsroot directory to use |
| DEVICE_TREE - the device tree blob to use |
| MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified) |
| Simplified QEMU command-line options can be passed with: |
| nographic - disable video console |
| serial - enable a serial console on /dev/ttyS0 |
| slirp - enable user networking, no root privileges is required |
| kvm - enable KVM when running x86/x86_64 (VT-capable CPU required) |
| kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required) |
| publicvnc - enable a VNC server open to all hosts |
| audio - enable audio |
| [*/]ovmf* - OVMF firmware file or base name for booting with UEFI |
| tcpserial=<port> - specify tcp serial port number |
| biosdir=<dir> - specify custom bios dir |
| biosfilename=<filename> - specify bios filename |
| qemuparams=<xyz> - specify custom parameters to QEMU |
| bootparams=<xyz> - specify custom kernel parameters during boot |
| help, -h, --help: print this text |
| -d, --debug: Enable debug output |
| -q, --quite: Hide most output except error messages |
| |
| Examples: |
| runqemu |
| runqemu qemuarm |
| runqemu tmp/deploy/images/qemuarm |
| runqemu tmp/deploy/images/qemux86/<qemuboot.conf> |
| runqemu qemux86-64 core-image-sato ext4 |
| runqemu qemux86-64 wic-image-minimal wic |
| runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial |
| runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz... |
| runqemu qemux86 qemuparams="-m 256" |
| runqemu qemux86 bootparams="psplash=false" |
| runqemu path/to/<image>-<machine>.wic |
| runqemu path/to/<image>-<machine>.wic.vmdk |
| """) |
| |
| def check_tun(): |
| """Check /dev/net/tun""" |
| dev_tun = '/dev/net/tun' |
| if not os.path.exists(dev_tun): |
| raise RunQemuError("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun) |
| |
| if not os.access(dev_tun, os.W_OK): |
| raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun)) |
| |
| def check_libgl(qemu_bin): |
| cmd = 'ldd %s' % qemu_bin |
| logger.debug('Running %s...' % cmd) |
| need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
| if re.search('libGLU', need_gl): |
| # We can't run without a libGL.so |
| libgl = False |
| check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \ |
| ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \ |
| ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so')) |
| |
| for (f1, f2) in check_files: |
| if re.search('\*', f1): |
| for g1 in glob.glob(f1): |
| if libgl: |
| break |
| if os.path.exists(g1): |
| for g2 in glob.glob(f2): |
| if os.path.exists(g2): |
| libgl = True |
| break |
| if libgl: |
| break |
| else: |
| if os.path.exists(f1) and os.path.exists(f2): |
| libgl = True |
| break |
| if not libgl: |
| logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.") |
| logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.") |
| logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.") |
| raise RunQemuError('%s requires libGLU, but not found' % qemu_bin) |
| |
| def get_first_file(cmds): |
| """Return first file found in wildcard cmds""" |
| for cmd in cmds: |
| all_files = glob.glob(cmd) |
| if all_files: |
| for f in all_files: |
| if not os.path.isdir(f): |
| return f |
| return '' |
| |
| def check_free_port(host, port): |
| """ Check whether the port is free or not """ |
| import socket |
| from contextlib import closing |
| |
| with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: |
| if sock.connect_ex((host, port)) == 0: |
| # Port is open, so not free |
| return False |
| else: |
| # Port is not open, so free |
| return True |
| |
| class BaseConfig(object): |
| def __init__(self): |
| # The self.d saved vars from self.set(), part of them are from qemuboot.conf |
| self.d = {'QB_KERNEL_ROOT': '/dev/vda'} |
| |
| # Supported env vars, add it here if a var can be got from env, |
| # and don't use os.getenv in the code. |
| self.env_vars = ('MACHINE', |
| 'ROOTFS', |
| 'KERNEL', |
| 'DEVICE_TREE', |
| 'DEPLOY_DIR_IMAGE', |
| 'OE_TMPDIR', |
| 'OECORE_NATIVE_SYSROOT', |
| ) |
| |
| self.qemu_opt = '' |
| self.qemu_opt_script = '' |
| self.clean_nfs_dir = False |
| self.nfs_server = '' |
| self.rootfs = '' |
| # File name(s) of a OVMF firmware file or variable store, |
| # to be added with -drive if=pflash. |
| # Found in the same places as the rootfs, with or without one of |
| # these suffices: qcow2, bin. |
| # Setting one also adds "-vga std" because that is all that |
| # OVMF supports. |
| self.ovmf_bios = [] |
| self.qemuboot = '' |
| self.qbconfload = False |
| self.kernel = '' |
| self.kernel_cmdline = '' |
| self.kernel_cmdline_script = '' |
| self.bootparams = '' |
| self.dtb = '' |
| self.fstype = '' |
| self.kvm_enabled = False |
| self.vhost_enabled = False |
| self.slirp_enabled = False |
| self.nfs_instance = 0 |
| self.nfs_running = False |
| self.serialstdio = False |
| self.cleantap = False |
| self.saved_stty = '' |
| self.audio_enabled = False |
| self.tcpserial_portnum = '' |
| self.custombiosdir = '' |
| self.lock = '' |
| self.lock_descriptor = None |
| self.bitbake_e = '' |
| self.snapshot = False |
| self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs', |
| 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz') |
| self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'wic.vmdk', |
| 'wic.qcow2', 'wic.vdi', 'iso') |
| self.network_device = "-device e1000,netdev=net0,mac=@MAC@" |
| # Use different mac section for tap and slirp to avoid |
| # conflicts, e.g., when one is running with tap, the other is |
| # running with slirp. |
| # The last section is dynamic, which is for avoiding conflicts, |
| # when multiple qemus are running, e.g., when multiple tap or |
| # slirp qemus are running. |
| self.mac_tap = "52:54:00:12:34:" |
| self.mac_slirp = "52:54:00:12:35:" |
| # pid of the actual qemu process |
| self.qemupid = None |
| # avoid cleanup twice |
| self.cleaned = False |
| |
| def acquire_lock(self, error=True): |
| logger.debug("Acquiring lockfile %s..." % self.lock) |
| try: |
| self.lock_descriptor = open(self.lock, 'w') |
| fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB) |
| except Exception as e: |
| msg = "Acquiring lockfile %s failed: %s" % (self.lock, e) |
| if error: |
| logger.error(msg) |
| else: |
| logger.info(msg) |
| if self.lock_descriptor: |
| self.lock_descriptor.close() |
| self.lock_descriptor = None |
| return False |
| return True |
| |
| def release_lock(self): |
| if self.lock_descriptor: |
| logger.debug("Releasing lockfile for tap device '%s'" % self.tap) |
| fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN) |
| self.lock_descriptor.close() |
| os.remove(self.lock) |
| self.lock_descriptor = None |
| |
| def get(self, key): |
| if key in self.d: |
| return self.d.get(key) |
| elif os.getenv(key): |
| return os.getenv(key) |
| else: |
| return '' |
| |
| def set(self, key, value): |
| self.d[key] = value |
| |
| def is_deploy_dir_image(self, p): |
| if os.path.isdir(p): |
| if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M): |
| logger.debug("Can't find required *.qemuboot.conf in %s" % p) |
| return False |
| if not any(map(lambda name: '-image-' in name, os.listdir(p))): |
| logger.debug("Can't find *-image-* in %s" % p) |
| return False |
| return True |
| else: |
| return False |
| |
| def check_arg_fstype(self, fst): |
| """Check and set FSTYPE""" |
| if fst not in self.fstypes + self.vmtypes: |
| logger.warn("Maybe unsupported FSTYPE: %s" % fst) |
| if not self.fstype or self.fstype == fst: |
| if fst == 'ramfs': |
| fst = 'cpio.gz' |
| if fst in ('tar.bz2', 'tar.gz'): |
| fst = 'nfs' |
| self.fstype = fst |
| else: |
| raise RunQemuError("Conflicting: FSTYPE %s and %s" % (self.fstype, fst)) |
| |
| def set_machine_deploy_dir(self, machine, deploy_dir_image): |
| """Set MACHINE and DEPLOY_DIR_IMAGE""" |
| logger.debug('MACHINE: %s' % machine) |
| self.set("MACHINE", machine) |
| logger.debug('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image) |
| self.set("DEPLOY_DIR_IMAGE", deploy_dir_image) |
| |
| def check_arg_nfs(self, p): |
| if os.path.isdir(p): |
| self.rootfs = p |
| else: |
| m = re.match('(.*):(.*)', p) |
| self.nfs_server = m.group(1) |
| self.rootfs = m.group(2) |
| self.check_arg_fstype('nfs') |
| |
| def check_arg_path(self, p): |
| """ |
| - Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf |
| - Check whether is a kernel file |
| - Check whether is a image file |
| - Check whether it is a nfs dir |
| - Check whether it is a OVMF flash file |
| """ |
| if p.endswith('.qemuboot.conf'): |
| self.qemuboot = p |
| self.qbconfload = True |
| elif re.search('\.bin$', p) or re.search('bzImage', p) or \ |
| re.search('zImage', p) or re.search('vmlinux', p) or \ |
| re.search('fitImage', p) or re.search('uImage', p): |
| self.kernel = p |
| elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p): |
| self.rootfs = p |
| # Check filename against self.fstypes can hanlde <file>.cpio.gz, |
| # otherwise, its type would be "gz", which is incorrect. |
| fst = "" |
| for t in self.fstypes: |
| if p.endswith(t): |
| fst = t |
| break |
| if not fst: |
| m = re.search('.*\.(.*)$', self.rootfs) |
| if m: |
| fst = m.group(1) |
| if fst: |
| self.check_arg_fstype(fst) |
| qb = re.sub('\.' + fst + "$", '', self.rootfs) |
| qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf') |
| if os.path.exists(qb): |
| self.qemuboot = qb |
| self.qbconfload = True |
| else: |
| logger.warn("%s doesn't exist" % qb) |
| else: |
| raise RunQemuError("Can't find FSTYPE from: %s" % p) |
| |
| elif os.path.isdir(p) or re.search(':', p) and re.search('/', p): |
| if self.is_deploy_dir_image(p): |
| logger.debug('DEPLOY_DIR_IMAGE: %s' % p) |
| self.set("DEPLOY_DIR_IMAGE", p) |
| else: |
| logger.debug("Assuming %s is an nfs rootfs" % p) |
| self.check_arg_nfs(p) |
| elif os.path.basename(p).startswith('ovmf'): |
| self.ovmf_bios.append(p) |
| else: |
| raise RunQemuError("Unknown path arg %s" % p) |
| |
| def check_arg_machine(self, arg): |
| """Check whether it is a machine""" |
| if self.get('MACHINE') == arg: |
| return |
| elif self.get('MACHINE') and self.get('MACHINE') != arg: |
| raise RunQemuError("Maybe conflicted MACHINE: %s vs %s" % (self.get('MACHINE'), arg)) |
| elif re.search('/', arg): |
| raise RunQemuError("Unknown arg: %s" % arg) |
| |
| logger.debug('Assuming MACHINE = %s' % arg) |
| |
| # if we're running under testimage, or similarly as a child |
| # of an existing bitbake invocation, we can't invoke bitbake |
| # to validate the MACHINE setting and must assume it's correct... |
| # FIXME: testimage.bbclass exports these two variables into env, |
| # are there other scenarios in which we need to support being |
| # invoked by bitbake? |
| deploy = self.get('DEPLOY_DIR_IMAGE') |
| bbchild = deploy and self.get('OE_TMPDIR') |
| if bbchild: |
| self.set_machine_deploy_dir(arg, deploy) |
| return |
| # also check whether we're running under a sourced toolchain |
| # environment file |
| if self.get('OECORE_NATIVE_SYSROOT'): |
| self.set("MACHINE", arg) |
| return |
| |
| cmd = 'MACHINE=%s bitbake -e' % arg |
| logger.info('Running %s...' % cmd) |
| self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
| # bitbake -e doesn't report invalid MACHINE as an error, so |
| # let's check DEPLOY_DIR_IMAGE to make sure that it is a valid |
| # MACHINE. |
| s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M) |
| if s: |
| deploy_dir_image = s.group(1) |
| else: |
| raise RunQemuError("bitbake -e %s" % self.bitbake_e) |
| if self.is_deploy_dir_image(deploy_dir_image): |
| self.set_machine_deploy_dir(arg, deploy_dir_image) |
| else: |
| logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image) |
| self.set("MACHINE", arg) |
| |
| def check_args(self): |
| for debug in ("-d", "--debug"): |
| if debug in sys.argv: |
| logger.setLevel(logging.DEBUG) |
| sys.argv.remove(debug) |
| |
| for quiet in ("-q", "--quiet"): |
| if quiet in sys.argv: |
| logger.setLevel(logging.ERROR) |
| sys.argv.remove(quiet) |
| |
| unknown_arg = "" |
| for arg in sys.argv[1:]: |
| if arg in self.fstypes + self.vmtypes: |
| self.check_arg_fstype(arg) |
| elif arg == 'nographic': |
| self.qemu_opt_script += ' -nographic' |
| self.kernel_cmdline_script += ' console=ttyS0' |
| elif arg == 'serial': |
| self.kernel_cmdline_script += ' console=ttyS0' |
| self.serialstdio = True |
| elif arg == 'audio': |
| logger.info("Enabling audio in qemu") |
| logger.info("Please install sound drivers in linux host") |
| self.audio_enabled = True |
| elif arg == 'kvm': |
| self.kvm_enabled = True |
| elif arg == 'kvm-vhost': |
| self.vhost_enabled = True |
| elif arg == 'slirp': |
| self.slirp_enabled = True |
| elif arg == 'snapshot': |
| self.snapshot = True |
| elif arg == 'publicvnc': |
| self.qemu_opt_script += ' -vnc :0' |
| elif arg.startswith('tcpserial='): |
| self.tcpserial_portnum = arg[len('tcpserial='):] |
| elif arg.startswith('biosdir='): |
| self.custombiosdir = arg[len('biosdir='):] |
| elif arg.startswith('biosfilename='): |
| self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):] |
| elif arg.startswith('qemuparams='): |
| self.qemu_opt_script += ' %s' % arg[len('qemuparams='):] |
| elif arg.startswith('bootparams='): |
| self.bootparams = arg[len('bootparams='):] |
| elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)): |
| self.check_arg_path(os.path.abspath(arg)) |
| elif re.search(r'-image-|-image$', arg): |
| # Lazy rootfs |
| self.rootfs = arg |
| elif arg.startswith('ovmf'): |
| self.ovmf_bios.append(arg) |
| else: |
| # At last, assume it is the MACHINE |
| if (not unknown_arg) or unknown_arg == arg: |
| unknown_arg = arg |
| else: |
| raise RunQemuError("Can't handle two unknown args: %s %s\n" |
| "Try 'runqemu help' on how to use it" % \ |
| (unknown_arg, arg)) |
| # Check to make sure it is a valid machine |
| if unknown_arg: |
| if self.get('MACHINE') == unknown_arg: |
| return |
| if self.get('DEPLOY_DIR_IMAGE'): |
| machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE')) |
| if unknown_arg == machine: |
| self.set("MACHINE", machine) |
| return |
| |
| self.check_arg_machine(unknown_arg) |
| |
| if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload): |
| self.load_bitbake_env() |
| s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M) |
| if s: |
| self.set("DEPLOY_DIR_IMAGE", s.group(1)) |
| |
| def check_kvm(self): |
| """Check kvm and kvm-host""" |
| if not (self.kvm_enabled or self.vhost_enabled): |
| self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU')) |
| return |
| |
| if not self.get('QB_CPU_KVM'): |
| raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm") |
| |
| self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM')) |
| yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu" |
| yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM" |
| dev_kvm = '/dev/kvm' |
| dev_vhost = '/dev/vhost-net' |
| with open('/proc/cpuinfo', 'r') as f: |
| kvm_cap = re.search('vmx|svm', "".join(f.readlines())) |
| if not kvm_cap: |
| logger.error("You are trying to enable KVM on a cpu without VT support.") |
| logger.error("Remove kvm from the command-line, or refer:") |
| raise RunQemuError(yocto_kvm_wiki) |
| |
| if not os.path.exists(dev_kvm): |
| logger.error("Missing KVM device. Have you inserted kvm modules?") |
| logger.error("For further help see:") |
| raise RunQemuError(yocto_kvm_wiki) |
| |
| if os.access(dev_kvm, os.W_OK|os.R_OK): |
| self.qemu_opt_script += ' -enable-kvm' |
| if self.get('MACHINE') == "qemux86": |
| # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs |
| # See YOCTO #12301 |
| # On 64 bit we use x2apic |
| self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic" |
| else: |
| logger.error("You have no read or write permission on /dev/kvm.") |
| logger.error("Please change the ownership of this file as described at:") |
| raise RunQemuError(yocto_kvm_wiki) |
| |
| if self.vhost_enabled: |
| if not os.path.exists(dev_vhost): |
| logger.error("Missing virtio net device. Have you inserted vhost-net module?") |
| logger.error("For further help see:") |
| raise RunQemuError(yocto_paravirt_kvm_wiki) |
| |
| if not os.access(dev_kvm, os.W_OK|os.R_OK): |
| logger.error("You have no read or write permission on /dev/vhost-net.") |
| logger.error("Please change the ownership of this file as described at:") |
| raise RunQemuError(yocto_kvm_wiki) |
| |
| def check_fstype(self): |
| """Check and setup FSTYPE""" |
| if not self.fstype: |
| fstype = self.get('QB_DEFAULT_FSTYPE') |
| if fstype: |
| self.fstype = fstype |
| else: |
| raise RunQemuError("FSTYPE is NULL!") |
| |
| def check_rootfs(self): |
| """Check and set rootfs""" |
| |
| if self.fstype == "none": |
| return |
| |
| if self.get('ROOTFS'): |
| if not self.rootfs: |
| self.rootfs = self.get('ROOTFS') |
| elif self.get('ROOTFS') != self.rootfs: |
| raise RunQemuError("Maybe conflicted ROOTFS: %s vs %s" % (self.get('ROOTFS'), self.rootfs)) |
| |
| if self.fstype == 'nfs': |
| return |
| |
| if self.rootfs and not os.path.exists(self.rootfs): |
| # Lazy rootfs |
| self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'), |
| self.rootfs, self.get('MACHINE'), |
| self.fstype) |
| elif not self.rootfs: |
| cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype) |
| cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype) |
| cmds = (cmd_name, cmd_link) |
| self.rootfs = get_first_file(cmds) |
| if not self.rootfs: |
| raise RunQemuError("Failed to find rootfs: %s or %s" % cmds) |
| |
| if not os.path.exists(self.rootfs): |
| raise RunQemuError("Can't find rootfs: %s" % self.rootfs) |
| |
| def check_ovmf(self): |
| """Check and set full path for OVMF firmware and variable file(s).""" |
| |
| for index, ovmf in enumerate(self.ovmf_bios): |
| if os.path.exists(ovmf): |
| continue |
| for suffix in ('qcow2', 'bin'): |
| path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix) |
| if os.path.exists(path): |
| self.ovmf_bios[index] = path |
| break |
| else: |
| raise RunQemuError("Can't find OVMF firmware: %s" % ovmf) |
| |
| def check_kernel(self): |
| """Check and set kernel""" |
| # The vm image doesn't need a kernel |
| if self.fstype in self.vmtypes: |
| return |
| |
| # See if the user supplied a KERNEL option |
| if self.get('KERNEL'): |
| self.kernel = self.get('KERNEL') |
| |
| # QB_DEFAULT_KERNEL is always a full file path |
| kernel_name = os.path.basename(self.get('QB_DEFAULT_KERNEL')) |
| |
| # The user didn't want a kernel to be loaded |
| if kernel_name == "none" and not self.kernel: |
| return |
| |
| deploy_dir_image = self.get('DEPLOY_DIR_IMAGE') |
| if not self.kernel: |
| kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name) |
| kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE')) |
| kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE')) |
| cmds = (kernel_match_name, kernel_match_link, kernel_startswith) |
| self.kernel = get_first_file(cmds) |
| if not self.kernel: |
| raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds) |
| |
| if not os.path.exists(self.kernel): |
| raise RunQemuError("KERNEL %s not found" % self.kernel) |
| |
| def check_dtb(self): |
| """Check and set dtb""" |
| # Did the user specify a device tree? |
| if self.get('DEVICE_TREE'): |
| self.dtb = self.get('DEVICE_TREE') |
| if not os.path.exists(self.dtb): |
| raise RunQemuError('Specified DTB not found: %s' % self.dtb) |
| return |
| |
| dtb = self.get('QB_DTB') |
| if dtb: |
| deploy_dir_image = self.get('DEPLOY_DIR_IMAGE') |
| cmd_match = "%s/%s" % (deploy_dir_image, dtb) |
| cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb) |
| cmd_wild = "%s/*.dtb" % deploy_dir_image |
| cmds = (cmd_match, cmd_startswith, cmd_wild) |
| self.dtb = get_first_file(cmds) |
| if not os.path.exists(self.dtb): |
| raise RunQemuError('DTB not found: %s, %s or %s' % cmds) |
| |
| def check_biosdir(self): |
| """Check custombiosdir""" |
| if not self.custombiosdir: |
| return |
| |
| biosdir = "" |
| biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir) |
| biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir) |
| for i in (self.custombiosdir, biosdir_native, biosdir_host): |
| if os.path.isdir(i): |
| biosdir = i |
| break |
| |
| if biosdir: |
| logger.debug("Assuming biosdir is: %s" % biosdir) |
| self.qemu_opt_script += ' -L %s' % biosdir |
| else: |
| logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host)) |
| raise RunQemuError("Invalid custombiosdir: %s" % self.custombiosdir) |
| |
| def check_mem(self): |
| s = re.search('-m +([0-9]+)', self.qemu_opt_script) |
| if s: |
| self.set('QB_MEM', '-m %s' % s.group(1)) |
| elif not self.get('QB_MEM'): |
| logger.info('QB_MEM is not set, use 512M by default') |
| self.set('QB_MEM', '-m 512') |
| |
| self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M' |
| self.qemu_opt_script += ' %s' % self.get('QB_MEM') |
| |
| def check_tcpserial(self): |
| if self.tcpserial_portnum: |
| if self.get('QB_TCPSERIAL_OPT'): |
| self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum) |
| else: |
| self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum |
| |
| def check_and_set(self): |
| """Check configs sanity and set when needed""" |
| self.validate_paths() |
| if not self.slirp_enabled: |
| check_tun() |
| # Check audio |
| if self.audio_enabled: |
| if not self.get('QB_AUDIO_DRV'): |
| raise RunQemuError("QB_AUDIO_DRV is NULL, this board doesn't support audio") |
| if not self.get('QB_AUDIO_OPT'): |
| logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work') |
| else: |
| self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT') |
| os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV')) |
| else: |
| os.putenv('QEMU_AUDIO_DRV', 'none') |
| |
| self.check_kvm() |
| self.check_fstype() |
| self.check_rootfs() |
| self.check_ovmf() |
| self.check_kernel() |
| self.check_dtb() |
| self.check_biosdir() |
| self.check_mem() |
| self.check_tcpserial() |
| |
| def read_qemuboot(self): |
| if not self.qemuboot: |
| if self.get('DEPLOY_DIR_IMAGE'): |
| deploy_dir_image = self.get('DEPLOY_DIR_IMAGE') |
| else: |
| logger.warn("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!") |
| return |
| |
| if self.rootfs and not os.path.exists(self.rootfs): |
| # Lazy rootfs |
| machine = self.get('MACHINE') |
| if not machine: |
| machine = os.path.basename(deploy_dir_image) |
| self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image, |
| self.rootfs, machine) |
| else: |
| cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image |
| logger.debug('Running %s...' % cmd) |
| try: |
| qbs = subprocess.check_output(cmd, shell=True).decode('utf-8') |
| except subprocess.CalledProcessError as err: |
| raise RunQemuError(err) |
| if qbs: |
| for qb in qbs.split(): |
| # Don't use initramfs when other choices unless fstype is ramfs |
| if '-initramfs-' in os.path.basename(qb) and self.fstype != 'cpio.gz': |
| continue |
| self.qemuboot = qb |
| break |
| if not self.qemuboot: |
| # Use the first one when no choice |
| self.qemuboot = qbs.split()[0] |
| self.qbconfload = True |
| |
| if not self.qemuboot: |
| # If we haven't found a .qemuboot.conf at this point it probably |
| # doesn't exist, continue without |
| return |
| |
| if not os.path.exists(self.qemuboot): |
| raise RunQemuError("Failed to find %s (wrong image name or BSP does not support running under qemu?)." % self.qemuboot) |
| |
| logger.debug('CONFFILE: %s' % self.qemuboot) |
| |
| cf = configparser.ConfigParser() |
| cf.read(self.qemuboot) |
| for k, v in cf.items('config_bsp'): |
| k_upper = k.upper() |
| if v.startswith("../"): |
| v = os.path.abspath(os.path.dirname(self.qemuboot) + "/" + v) |
| elif v == ".": |
| v = os.path.dirname(self.qemuboot) |
| self.set(k_upper, v) |
| |
| def validate_paths(self): |
| """Ensure all relevant path variables are set""" |
| # When we're started with a *.qemuboot.conf arg assume that image |
| # artefacts are relative to that file, rather than in whatever |
| # directory DEPLOY_DIR_IMAGE in the conf file points to. |
| if self.qbconfload: |
| imgdir = os.path.realpath(os.path.dirname(self.qemuboot)) |
| if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')): |
| logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir)) |
| self.set('DEPLOY_DIR_IMAGE', imgdir) |
| |
| # If the STAGING_*_NATIVE directories from the config file don't exist |
| # and we're in a sourced OE build directory try to extract the paths |
| # from `bitbake -e` |
| havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \ |
| os.path.exists(self.get('STAGING_BINDIR_NATIVE')) |
| |
| if not havenative: |
| if not self.bitbake_e: |
| self.load_bitbake_env() |
| |
| if self.bitbake_e: |
| native_vars = ['STAGING_DIR_NATIVE'] |
| for nv in native_vars: |
| s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M) |
| if s and s.group(1) != self.get(nv): |
| logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1))) |
| self.set(nv, s.group(1)) |
| else: |
| # when we're invoked from a running bitbake instance we won't |
| # be able to call `bitbake -e`, then try: |
| # - get OE_TMPDIR from environment and guess paths based on it |
| # - get OECORE_NATIVE_SYSROOT from environment (for sdk) |
| tmpdir = self.get('OE_TMPDIR') |
| oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT') |
| if tmpdir: |
| logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir) |
| hostos, _, _, _, machine = os.uname() |
| buildsys = '%s-%s' % (machine, hostos.lower()) |
| staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys) |
| self.set('STAGING_DIR_NATIVE', staging_dir_native) |
| elif oecore_native_sysroot: |
| logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot) |
| self.set('STAGING_DIR_NATIVE', oecore_native_sysroot) |
| if self.get('STAGING_DIR_NATIVE'): |
| # we have to assume that STAGING_BINDIR_NATIVE is at usr/bin |
| staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE') |
| logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native) |
| self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')) |
| |
| def print_config(self): |
| logger.info('Continuing with the following parameters:\n') |
| if not self.fstype in self.vmtypes: |
| print('KERNEL: [%s]' % self.kernel) |
| if self.dtb: |
| print('DTB: [%s]' % self.dtb) |
| print('MACHINE: [%s]' % self.get('MACHINE')) |
| print('FSTYPE: [%s]' % self.fstype) |
| if self.fstype == 'nfs': |
| print('NFS_DIR: [%s]' % self.rootfs) |
| else: |
| print('ROOTFS: [%s]' % self.rootfs) |
| if self.ovmf_bios: |
| print('OVMF: %s' % self.ovmf_bios) |
| print('CONFFILE: [%s]' % self.qemuboot) |
| print('') |
| |
| def setup_nfs(self): |
| if not self.nfs_server: |
| if self.slirp_enabled: |
| self.nfs_server = '10.0.2.2' |
| else: |
| self.nfs_server = '192.168.7.1' |
| |
| # Figure out a new nfs_instance to allow multiple qemus running. |
| # CentOS 7.1's ps doesn't print full command line without "ww" |
| # when invoke by subprocess.Popen(). |
| cmd = "ps auxww" |
| ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
| pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) ' |
| all_instances = re.findall(pattern, ps, re.M) |
| if all_instances: |
| all_instances.sort(key=int) |
| self.nfs_instance = int(all_instances.pop()) + 1 |
| |
| nfsd_port = 3049 + 2 * self.nfs_instance |
| mountd_port = 3048 + 2 * self.nfs_instance |
| |
| # Export vars for runqemu-export-rootfs |
| export_dict = { |
| 'NFS_INSTANCE': self.nfs_instance, |
| 'NFSD_PORT': nfsd_port, |
| 'MOUNTD_PORT': mountd_port, |
| } |
| for k, v in export_dict.items(): |
| # Use '%s' since they are integers |
| os.putenv(k, '%s' % v) |
| |
| self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port) |
| |
| # Extract .tar.bz2 or .tar.bz if no nfs dir |
| if not (self.rootfs and os.path.isdir(self.rootfs)): |
| src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME')) |
| dest = "%s-nfsroot" % src_prefix |
| if os.path.exists('%s.pseudo_state' % dest): |
| logger.info('Use %s as NFS_DIR' % dest) |
| self.rootfs = dest |
| else: |
| src = "" |
| src1 = '%s.tar.bz2' % src_prefix |
| src2 = '%s.tar.gz' % src_prefix |
| if os.path.exists(src1): |
| src = src1 |
| elif os.path.exists(src2): |
| src = src2 |
| if not src: |
| raise RunQemuError("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2)) |
| logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest)) |
| cmd = 'runqemu-extract-sdk %s %s' % (src, dest) |
| logger.info('Running %s...' % cmd) |
| if subprocess.call(cmd, shell=True) != 0: |
| raise RunQemuError('Failed to run %s' % cmd) |
| self.clean_nfs_dir = True |
| self.rootfs = dest |
| |
| # Start the userspace NFS server |
| cmd = 'runqemu-export-rootfs start %s' % self.rootfs |
| logger.info('Running %s...' % cmd) |
| if subprocess.call(cmd, shell=True) != 0: |
| raise RunQemuError('Failed to run %s' % cmd) |
| |
| self.nfs_running = True |
| |
| def setup_slirp(self): |
| """Setup user networking""" |
| |
| if self.fstype == 'nfs': |
| self.setup_nfs() |
| self.kernel_cmdline_script += ' ip=dhcp' |
| # Port mapping |
| hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23" |
| qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE')) |
| qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default |
| # Figure out the port |
| ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt) |
| ports = [int(i) for i in ports] |
| mac = 2 |
| # Find a free port to avoid conflicts |
| for p in ports[:]: |
| p_new = p |
| while not check_free_port('localhost', p_new): |
| p_new += 1 |
| mac += 1 |
| while p_new in ports: |
| p_new += 1 |
| mac += 1 |
| if p != p_new: |
| ports.append(p_new) |
| qb_slirp_opt = re.sub(':%s-' % p, ':%s-' % p_new, qb_slirp_opt) |
| logger.info("Port forward changed: %s -> %s" % (p, p_new)) |
| mac = "%s%02x" % (self.mac_slirp, mac) |
| self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qb_slirp_opt)) |
| # Print out port foward |
| hostfwd = re.findall('(hostfwd=[^,]*)', qb_slirp_opt) |
| if hostfwd: |
| logger.info('Port forward: %s' % ' '.join(hostfwd)) |
| |
| def setup_tap(self): |
| """Setup tap""" |
| |
| # This file is created when runqemu-gen-tapdevs creates a bank of tap |
| # devices, indicating that the user should not bring up new ones using |
| # sudo. |
| nosudo_flag = '/etc/runqemu-nosudo' |
| self.qemuifup = shutil.which('runqemu-ifup') |
| self.qemuifdown = shutil.which('runqemu-ifdown') |
| ip = shutil.which('ip') |
| lockdir = "/tmp/qemu-tap-locks" |
| |
| if not (self.qemuifup and self.qemuifdown and ip): |
| logger.error("runqemu-ifup: %s" % self.qemuifup) |
| logger.error("runqemu-ifdown: %s" % self.qemuifdown) |
| logger.error("ip: %s" % ip) |
| raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found") |
| |
| if not os.path.exists(lockdir): |
| # There might be a race issue when multi runqemu processess are |
| # running at the same time. |
| try: |
| os.mkdir(lockdir) |
| os.chmod(lockdir, 0o777) |
| except FileExistsError: |
| pass |
| |
| cmd = '%s link' % ip |
| logger.debug('Running %s...' % cmd) |
| ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
| # Matches line like: 6: tap0: <foo> |
| possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M) |
| tap = "" |
| for p in possibles: |
| lockfile = os.path.join(lockdir, p) |
| if os.path.exists('%s.skip' % lockfile): |
| logger.info('Found %s.skip, skipping %s' % (lockfile, p)) |
| continue |
| self.lock = lockfile + '.lock' |
| if self.acquire_lock(error=False): |
| tap = p |
| logger.info("Using preconfigured tap device %s" % tap) |
| logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap)) |
| break |
| |
| if not tap: |
| if os.path.exists(nosudo_flag): |
| logger.error("Error: There are no available tap devices to use for networking,") |
| logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag) |
| raise RunQemuError("a new one with sudo.") |
| |
| gid = os.getgid() |
| uid = os.getuid() |
| logger.info("Setting up tap interface under sudo") |
| cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.bindir_native) |
| tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n') |
| lockfile = os.path.join(lockdir, tap) |
| self.lock = lockfile + '.lock' |
| self.acquire_lock() |
| self.cleantap = True |
| logger.debug('Created tap: %s' % tap) |
| |
| if not tap: |
| logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.") |
| return 1 |
| self.tap = tap |
| tapnum = int(tap[3:]) |
| gateway = tapnum * 2 + 1 |
| client = gateway + 1 |
| if self.fstype == 'nfs': |
| self.setup_nfs() |
| netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway) |
| logger.info("Network configuration: %s", netconf) |
| self.kernel_cmdline_script += " ip=%s" % netconf |
| mac = "%s%02x" % (self.mac_tap, client) |
| qb_tap_opt = self.get('QB_TAP_OPT') |
| if qb_tap_opt: |
| qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap) |
| else: |
| qemu_tap_opt = "-netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (self.tap) |
| |
| if self.vhost_enabled: |
| qemu_tap_opt += ',vhost=on' |
| |
| self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt)) |
| |
| def setup_network(self): |
| if self.get('QB_NET') == 'none': |
| return |
| if sys.stdin.isatty(): |
| self.saved_stty = subprocess.check_output("stty -g", shell=True).decode('utf-8') |
| self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device |
| if self.slirp_enabled: |
| self.setup_slirp() |
| else: |
| self.setup_tap() |
| |
| def setup_rootfs(self): |
| if self.get('QB_ROOTFS') == 'none': |
| return |
| if 'wic.' in self.fstype: |
| self.fstype = self.fstype[4:] |
| rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw' |
| |
| qb_rootfs_opt = self.get('QB_ROOTFS_OPT') |
| if qb_rootfs_opt: |
| self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs) |
| else: |
| self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format) |
| |
| if self.fstype in ('cpio.gz', 'cpio'): |
| self.kernel_cmdline = 'root=/dev/ram0 rw debugshell' |
| self.rootfs_options = '-initrd %s' % self.rootfs |
| else: |
| vm_drive = '' |
| if self.fstype in self.vmtypes: |
| if self.fstype == 'iso': |
| vm_drive = '-drive file=%s,if=virtio,media=cdrom' % self.rootfs |
| elif self.get('QB_DRIVE_TYPE'): |
| drive_type = self.get('QB_DRIVE_TYPE') |
| if drive_type.startswith("/dev/sd"): |
| logger.info('Using scsi drive') |
| vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \ |
| % (self.rootfs, rootfs_format) |
| elif drive_type.startswith("/dev/hd"): |
| logger.info('Using ide drive') |
| vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format) |
| else: |
| # virtio might have been selected explicitly (just use it), or |
| # is used as fallback (then warn about that). |
| if not drive_type.startswith("/dev/vd"): |
| logger.warn("Unknown QB_DRIVE_TYPE: %s" % drive_type) |
| logger.warn("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE") |
| logger.warn('Trying to use virtio block drive') |
| vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format) |
| |
| # All branches above set vm_drive. |
| self.rootfs_options = '%s -no-reboot' % vm_drive |
| self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT')) |
| |
| if self.fstype == 'nfs': |
| self.rootfs_options = '' |
| k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts) |
| self.kernel_cmdline = 'root=%s rw highres=off' % k_root |
| |
| if self.fstype == 'none': |
| self.rootfs_options = '' |
| |
| self.set('ROOTFS_OPTIONS', self.rootfs_options) |
| |
| def guess_qb_system(self): |
| """attempt to determine the appropriate qemu-system binary""" |
| mach = self.get('MACHINE') |
| if not mach: |
| search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*' |
| if self.rootfs: |
| match = re.match(search, self.rootfs) |
| if match: |
| mach = match.group(1) |
| elif self.kernel: |
| match = re.match(search, self.kernel) |
| if match: |
| mach = match.group(1) |
| |
| if not mach: |
| return None |
| |
| if mach == 'qemuarm': |
| qbsys = 'arm' |
| elif mach == 'qemuarm64': |
| qbsys = 'aarch64' |
| elif mach == 'qemux86': |
| qbsys = 'i386' |
| elif mach == 'qemux86-64': |
| qbsys = 'x86_64' |
| elif mach == 'qemuppc': |
| qbsys = 'ppc' |
| elif mach == 'qemumips': |
| qbsys = 'mips' |
| elif mach == 'qemumips64': |
| qbsys = 'mips64' |
| elif mach == 'qemumipsel': |
| qbsys = 'mipsel' |
| elif mach == 'qemumips64el': |
| qbsys = 'mips64el' |
| elif mach == 'qemuriscv64': |
| qbsys = 'riscv64' |
| elif mach == 'qemuriscv32': |
| qbsys = 'riscv32' |
| else: |
| logger.error("Unable to determine QEMU PC System emulator for %s machine." % mach) |
| logger.error("As %s is not among valid QEMU machines such as," % mach) |
| logger.error("qemux86-64, qemux86, qemuarm64, qemuarm, qemumips64, qemumips64el, qemumipsel, qemumips, qemuppc") |
| raise RunQemuError("Set qb_system_name with suitable QEMU PC System emulator in .*qemuboot.conf.") |
| |
| return 'qemu-system-%s' % qbsys |
| |
| def setup_final(self): |
| qemu_system = self.get('QB_SYSTEM_NAME') |
| if not qemu_system: |
| qemu_system = self.guess_qb_system() |
| if not qemu_system: |
| raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!") |
| |
| qemu_bin = '%s/%s' % (self.bindir_native, qemu_system) |
| |
| # It is possible to have qemu-native in ASSUME_PROVIDED, and it won't |
| # find QEMU in sysroot, it needs to use host's qemu. |
| if not os.path.exists(qemu_bin): |
| logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin) |
| for path in (os.environ['PATH'] or '').split(':'): |
| qemu_bin_tmp = os.path.join(path, qemu_system) |
| logger.info("Trying: %s" % qemu_bin_tmp) |
| if os.path.exists(qemu_bin_tmp): |
| qemu_bin = qemu_bin_tmp |
| if not os.path.isabs(qemu_bin): |
| qemu_bin = os.path.abspath(qemu_bin) |
| logger.info("Using host's QEMU: %s" % qemu_bin) |
| break |
| |
| if not os.access(qemu_bin, os.X_OK): |
| raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin) |
| |
| check_libgl(qemu_bin) |
| |
| self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND')) |
| |
| for ovmf in self.ovmf_bios: |
| format = ovmf.rsplit('.', 1)[-1] |
| self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf) |
| if self.ovmf_bios: |
| # OVMF only supports normal VGA, i.e. we need to override a -vga vmware |
| # that gets added for example for normal qemux86. |
| self.qemu_opt += ' -vga std' |
| |
| self.qemu_opt += ' ' + self.qemu_opt_script |
| |
| if self.snapshot: |
| self.qemu_opt += " -snapshot" |
| |
| if self.serialstdio: |
| if sys.stdin.isatty(): |
| subprocess.check_call("stty intr ^]", shell=True) |
| logger.info("Interrupt character is '^]'") |
| |
| first_serial = "" |
| if not re.search("-nographic", self.qemu_opt): |
| first_serial = "-serial mon:vc" |
| # We always want a ttyS1. Since qemu by default adds a serial |
| # port when nodefaults is not specified, it seems that all that |
| # would be needed is to make sure a "-serial" is there. However, |
| # it appears that when "-serial" is specified, it ignores the |
| # default serial port that is normally added. So here we make |
| # sure to add two -serial if there are none. And only one if |
| # there is one -serial already. |
| serial_num = len(re.findall("-serial", self.qemu_opt)) |
| if serial_num == 0: |
| self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT")) |
| elif serial_num == 1: |
| self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT") |
| |
| # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES), |
| # if not serial or serialtcp options was specified only ttyS0 is created |
| # and sysvinit shows an error trying to enable ttyS1: |
| # INIT: Id "S1" respawning too fast: disabled for 5 minutes |
| serial_num = len(re.findall("-serial", self.qemu_opt)) |
| if serial_num == 0: |
| if re.search("-nographic", self.qemu_opt): |
| self.qemu_opt += " -serial mon:stdio -serial null" |
| else: |
| self.qemu_opt += " -serial mon:vc -serial null" |
| |
| def start_qemu(self): |
| import shlex |
| if self.kernel: |
| kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline, |
| self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'), |
| self.bootparams) |
| if self.dtb: |
| kernel_opts += " -dtb %s" % self.dtb |
| else: |
| kernel_opts = "" |
| cmd = "%s %s" % (self.qemu_opt, kernel_opts) |
| cmds = shlex.split(cmd) |
| logger.info('Running %s\n' % cmd) |
| pass_fds = [] |
| if self.lock_descriptor: |
| pass_fds = [self.lock_descriptor.fileno()] |
| process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds) |
| self.qemupid = process.pid |
| retcode = process.wait() |
| if retcode: |
| if retcode == -signal.SIGTERM: |
| logger.info("Qemu terminated by SIGTERM") |
| else: |
| logger.error("Failed to run qemu: %s", process.stderr.read().decode()) |
| |
| def cleanup(self): |
| if self.cleaned: |
| return |
| |
| # avoid dealing with SIGTERM when cleanup function is running |
| signal.signal(signal.SIGTERM, signal.SIG_IGN) |
| |
| logger.info("Cleaning up") |
| if self.cleantap: |
| cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.bindir_native) |
| logger.debug('Running %s' % cmd) |
| subprocess.check_call(cmd, shell=True) |
| self.release_lock() |
| |
| if self.nfs_running: |
| logger.info("Shutting down the userspace NFS server...") |
| cmd = "runqemu-export-rootfs stop %s" % self.rootfs |
| logger.debug('Running %s' % cmd) |
| subprocess.check_call(cmd, shell=True) |
| |
| if self.saved_stty: |
| cmd = "stty %s" % self.saved_stty |
| subprocess.check_call(cmd, shell=True) |
| |
| if self.clean_nfs_dir: |
| logger.info('Removing %s' % self.rootfs) |
| shutil.rmtree(self.rootfs) |
| shutil.rmtree('%s.pseudo_state' % self.rootfs) |
| |
| self.cleaned = True |
| |
| def load_bitbake_env(self, mach=None): |
| if self.bitbake_e: |
| return |
| |
| bitbake = shutil.which('bitbake') |
| if not bitbake: |
| return |
| |
| if not mach: |
| mach = self.get('MACHINE') |
| |
| if mach: |
| cmd = 'MACHINE=%s bitbake -e' % mach |
| else: |
| cmd = 'bitbake -e' |
| |
| logger.info('Running %s...' % cmd) |
| try: |
| self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8') |
| except subprocess.CalledProcessError as err: |
| self.bitbake_e = '' |
| logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8')) |
| |
| def validate_combos(self): |
| if (self.fstype in self.vmtypes) and self.kernel: |
| raise RunQemuError("%s doesn't need kernel %s!" % (self.fstype, self.kernel)) |
| |
| @property |
| def bindir_native(self): |
| result = self.get('STAGING_BINDIR_NATIVE') |
| if result and os.path.exists(result): |
| return result |
| |
| cmd = 'bitbake qemu-helper-native -e' |
| logger.info('Running %s...' % cmd) |
| out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) |
| out = out.stdout.read().decode('utf-8') |
| |
| match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M) |
| if match: |
| result = match.group(1) |
| if os.path.exists(result): |
| self.set('STAGING_BINDIR_NATIVE', result) |
| return result |
| raise RunQemuError("Native sysroot directory %s doesn't exist" % result) |
| else: |
| raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd) |
| |
| |
| def main(): |
| if "help" in sys.argv or '-h' in sys.argv or '--help' in sys.argv: |
| print_usage() |
| return 0 |
| try: |
| config = BaseConfig() |
| |
| def sigterm_handler(signum, frame): |
| logger.info("SIGTERM received") |
| os.kill(config.qemupid, signal.SIGTERM) |
| config.cleanup() |
| subprocess.check_call(["tput", "smam"]) |
| signal.signal(signal.SIGTERM, sigterm_handler) |
| |
| config.check_args() |
| config.read_qemuboot() |
| config.check_and_set() |
| # Check whether the combos is valid or not |
| config.validate_combos() |
| config.print_config() |
| config.setup_network() |
| config.setup_rootfs() |
| config.setup_final() |
| config.start_qemu() |
| except RunQemuError as err: |
| logger.error(err) |
| return 1 |
| except Exception as err: |
| import traceback |
| traceback.print_exc() |
| return 1 |
| finally: |
| config.cleanup() |
| subprocess.check_call(["tput", "smam"]) |
| |
| if __name__ == "__main__": |
| sys.exit(main()) |