summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/autobuilder-worker-prereq-tests71
-rwxr-xr-xscripts/bitbake-prserv-tool3
-rwxr-xr-xscripts/bitbake-whatchanged12
-rwxr-xr-xscripts/buildhistory-collect-srcrevs14
-rwxr-xr-xscripts/buildhistory-diff126
-rwxr-xr-xscripts/buildstats-diff511
-rwxr-xr-xscripts/cleanup-workdir198
-rwxr-xr-xscripts/combo-layer24
-rwxr-xr-xscripts/combo-layer-hook-default.sh3
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh15
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix.sh15
-rwxr-xr-xscripts/contrib/bb-perf/buildstats-plot.sh144
-rwxr-xr-xscripts/contrib/bb-perf/buildstats.sh114
-rwxr-xr-xscripts/contrib/bbvars.py154
-rwxr-xr-xscripts/contrib/build-perf-test-wrapper.sh184
-rwxr-xr-xscripts/contrib/build-perf-test.sh400
-rwxr-xr-xscripts/contrib/ddimage92
-rwxr-xr-xscripts/contrib/devtool-stress.py13
-rwxr-xr-xscripts/contrib/dialog-power-control2
-rwxr-xr-xscripts/contrib/documentation-audit.sh3
-rwxr-xr-xscripts/contrib/graph-tool13
-rwxr-xr-xscripts/contrib/list-packageconfig-flags.py21
-rwxr-xr-xscripts/contrib/mkefidisk.sh459
-rwxr-xr-xscripts/contrib/oe-build-perf-report-email.py276
-rwxr-xr-xscripts/contrib/patchreview.py238
-rwxr-xr-xscripts/contrib/patchtest.sh104
-rwxr-xr-xscripts/contrib/python/generate-manifest-2.7.py397
-rwxr-xr-xscripts/contrib/python/generate-manifest-3.5.py396
-rwxr-xr-xscripts/contrib/serdevtry3
-rwxr-xr-xscripts/contrib/test_build_time.sh16
-rwxr-xr-xscripts/contrib/test_build_time_worker.sh4
-rwxr-xr-xscripts/contrib/uncovered15
-rwxr-xr-xscripts/contrib/verify-homepage.py8
-rwxr-xr-xscripts/cp-noerror2
-rwxr-xr-xscripts/create-pull-request53
-rwxr-xr-xscripts/crosstap592
-rwxr-xr-xscripts/devtool80
-rwxr-xr-xscripts/gen-lockedsig-cache58
-rwxr-xr-xscripts/gen-site-config12
-rw-r--r--scripts/lib/argparse_oe.py11
-rw-r--r--scripts/lib/build_perf/__init__.py24
-rw-r--r--scripts/lib/build_perf/html.py12
-rw-r--r--scripts/lib/build_perf/html/measurement_chart.html50
-rw-r--r--scripts/lib/build_perf/html/report.html289
-rw-r--r--scripts/lib/build_perf/report.py338
-rw-r--r--scripts/lib/build_perf/scrape-html-report.js56
-rw-r--r--scripts/lib/buildstats.py346
-rw-r--r--scripts/lib/checklayer/__init__.py398
-rw-r--r--scripts/lib/checklayer/case.py9
-rw-r--r--scripts/lib/checklayer/cases/__init__.py (renamed from scripts/lib/wic/imager/__init__.py)0
-rw-r--r--scripts/lib/checklayer/cases/bsp.py206
-rw-r--r--scripts/lib/checklayer/cases/common.py60
-rw-r--r--scripts/lib/checklayer/cases/distro.py28
-rw-r--r--scripts/lib/checklayer/context.py17
-rw-r--r--scripts/lib/devtool/__init__.py187
-rw-r--r--scripts/lib/devtool/build.py34
-rw-r--r--scripts/lib/devtool/build_image.py22
-rw-r--r--scripts/lib/devtool/build_sdk.py12
-rw-r--r--scripts/lib/devtool/deploy.py101
-rw-r--r--scripts/lib/devtool/export.py109
-rw-r--r--scripts/lib/devtool/import.py134
-rw-r--r--scripts/lib/devtool/menuconfig.py79
-rw-r--r--scripts/lib/devtool/package.py20
-rw-r--r--scripts/lib/devtool/runqemu.py25
-rw-r--r--scripts/lib/devtool/sdk.py31
-rw-r--r--scripts/lib/devtool/search.py114
-rw-r--r--scripts/lib/devtool/standard.py1446
-rw-r--r--scripts/lib/devtool/upgrade.py384
-rw-r--r--scripts/lib/devtool/utilcmds.py77
-rw-r--r--scripts/lib/recipetool/append.py75
-rw-r--r--scripts/lib/recipetool/create.py495
-rw-r--r--scripts/lib/recipetool/create_buildsys.py16
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py53
-rw-r--r--scripts/lib/recipetool/create_kernel.py16
-rw-r--r--scripts/lib/recipetool/create_kmod.py16
-rw-r--r--scripts/lib/recipetool/create_npm.py208
-rw-r--r--scripts/lib/recipetool/edit.py44
-rw-r--r--scripts/lib/recipetool/newappend.py34
-rw-r--r--scripts/lib/recipetool/setvar.py14
-rw-r--r--scripts/lib/resulttool/__init__.py (renamed from scripts/lib/wic/utils/__init__.py)0
-rw-r--r--scripts/lib/resulttool/log.py97
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py235
-rw-r--r--scripts/lib/resulttool/merge.py46
-rw-r--r--scripts/lib/resulttool/regression.py186
-rw-r--r--scripts/lib/resulttool/report.py290
-rw-r--r--scripts/lib/resulttool/resultutils.py221
-rw-r--r--scripts/lib/resulttool/store.py104
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt79
-rw-r--r--scripts/lib/scriptpath.py12
-rw-r--r--scripts/lib/scriptutils.py282
-rw-r--r--scripts/lib/wic/__init__.py12
-rw-r--r--scripts/lib/wic/__version__.py1
-rw-r--r--scripts/lib/wic/canned-wks/common.wks.inc2
-rw-r--r--scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg28
-rw-r--r--scripts/lib/wic/canned-wks/efi-bootdisk.wks.in3
-rw-r--r--scripts/lib/wic/canned-wks/mkefidisk.wks4
-rw-r--r--scripts/lib/wic/canned-wks/mkgummidisk.wks11
-rw-r--r--scripts/lib/wic/canned-wks/mkhybridiso.wks2
-rw-r--r--scripts/lib/wic/canned-wks/mksystemd-bootdisk.wks11
-rw-r--r--scripts/lib/wic/canned-wks/qemuriscv.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--scripts/lib/wic/canned-wks/sdimage-bootpart.wks4
-rw-r--r--scripts/lib/wic/canned-wks/systemd-bootdisk.wks11
-rw-r--r--scripts/lib/wic/conf.py103
-rw-r--r--scripts/lib/wic/config/wic.conf6
-rw-r--r--scripts/lib/wic/creator.py125
-rw-r--r--scripts/lib/wic/engine.py511
-rw-r--r--scripts/lib/wic/filemap.py86
-rw-r--r--scripts/lib/wic/help.py413
-rw-r--r--scripts/lib/wic/imager/baseimager.py191
-rw-r--r--scripts/lib/wic/imager/direct.py407
-rw-r--r--scripts/lib/wic/ksparser.py113
-rw-r--r--scripts/lib/wic/misc.py (renamed from scripts/lib/wic/utils/oe/misc.py)193
-rw-r--r--scripts/lib/wic/msger.py235
-rw-r--r--scripts/lib/wic/partition.py384
-rw-r--r--scripts/lib/wic/plugin.py150
-rw-r--r--scripts/lib/wic/pluginbase.py95
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py602
-rw-r--r--scripts/lib/wic/plugins/imager/direct_plugin.py103
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-biosplusefi.py213
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py210
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py204
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py135
-rw-r--r--scripts/lib/wic/plugins/source/fsimage.py73
-rw-r--r--scripts/lib/wic/plugins/source/isoimage-isohybrid.py349
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py84
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py91
-rw-r--r--scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py177
-rw-r--r--scripts/lib/wic/test1
-rw-r--r--scripts/lib/wic/utils/errors.py29
-rw-r--r--scripts/lib/wic/utils/misc.py95
-rw-r--r--scripts/lib/wic/utils/oe/__init__.py22
-rw-r--r--scripts/lib/wic/utils/partitionedfs.py370
-rw-r--r--scripts/lib/wic/utils/runner.py110
-rw-r--r--scripts/lib/wic/utils/syslinux.py58
-rwxr-xr-xscripts/lnr3
-rw-r--r--scripts/multilib_header_wrapper.h47
-rwxr-xr-xscripts/native-intercept/chgrp5
-rwxr-xr-xscripts/native-intercept/chown3
-rwxr-xr-xscripts/oe-build-perf-report607
-rwxr-xr-xscripts/oe-build-perf-test115
-rwxr-xr-xscripts/oe-buildenv-internal44
-rwxr-xr-xscripts/oe-check-sstate12
-rwxr-xr-xscripts/oe-depends-dot157
-rwxr-xr-xscripts/oe-find-native-sysroot80
-rwxr-xr-xscripts/oe-git-archive118
-rwxr-xr-xscripts/oe-git-proxy43
-rwxr-xr-xscripts/oe-gnome-terminal-phonehome2
-rwxr-xr-xscripts/oe-pkgdata-util234
-rwxr-xr-xscripts/oe-publish-sdk20
-rwxr-xr-xscripts/oe-pylint13
-rwxr-xr-xscripts/oe-run-native56
-rwxr-xr-xscripts/oe-selftest667
-rwxr-xr-xscripts/oe-setup-builddir30
-rwxr-xr-xscripts/oe-setup-rpmrepo97
-rwxr-xr-xscripts/oe-test83
-rwxr-xr-xscripts/oe-trim-schemas12
-rwxr-xr-xscripts/oepydevshell-internal.py22
-rwxr-xr-xscripts/opkg-query-helper.py15
-rw-r--r--scripts/postinst-intercepts/delay_to_first_boot6
-rwxr-xr-xscripts/postinst-intercepts/postinst_intercept2
-rw-r--r--scripts/postinst-intercepts/update_font_cache6
-rw-r--r--scripts/postinst-intercepts/update_gio_module_cache8
-rw-r--r--scripts/postinst-intercepts/update_gtk_icon_cache (renamed from scripts/postinst-intercepts/update_icon_cache)6
-rw-r--r--scripts/postinst-intercepts/update_gtk_immodules_cache19
-rw-r--r--scripts/postinst-intercepts/update_pixbuf_cache6
-rw-r--r--scripts/postinst-intercepts/update_udev_hwdb9
-rwxr-xr-xscripts/pybootchartgui/pybootchartgui.py2
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py1315
-rw-r--r--scripts/pybootchartgui/pybootchartgui/gui.py208
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py134
-rw-r--r--scripts/pybootchartgui/pybootchartgui/samples.py27
-rwxr-xr-xscripts/pythondeps10
-rwxr-xr-xscripts/recipetool19
-rwxr-xr-xscripts/relocate_sdk.py15
-rwxr-xr-xscripts/resulttool78
-rwxr-xr-xscripts/rpm2cpio.sh107
-rwxr-xr-xscripts/runqemu1119
-rwxr-xr-xscripts/runqemu-addptable2image14
-rwxr-xr-xscripts/runqemu-export-rootfs37
-rwxr-xr-xscripts/runqemu-extract-sdk37
-rwxr-xr-xscripts/runqemu-gen-tapdevs45
-rwxr-xr-xscripts/runqemu-ifdown28
-rwxr-xr-xscripts/runqemu-ifup18
-rw-r--r--scripts/runqemu.README2
-rwxr-xr-xscripts/send-error-report20
-rwxr-xr-xscripts/send-pull-request24
-rwxr-xr-xscripts/sstate-cache-management.sh13
-rwxr-xr-xscripts/sstate-diff-machines.sh6
-rwxr-xr-xscripts/sstate-sysroot-cruft.sh18
-rwxr-xr-xscripts/sysroot-relativelinks.py6
-rwxr-xr-xscripts/task-time135
-rwxr-xr-xscripts/test-dependencies.sh286
-rwxr-xr-xscripts/test-reexec21
-rwxr-xr-xscripts/test-remote-image14
-rwxr-xr-xscripts/tiny/dirsize.py16
-rwxr-xr-xscripts/tiny/ksize.py33
-rwxr-xr-xscripts/tiny/ksum.py154
-rwxr-xr-xscripts/verify-bashisms118
-rwxr-xr-xscripts/wic541
-rwxr-xr-xscripts/wipe-sysroot54
-rwxr-xr-xscripts/yocto-check-layer217
-rwxr-xr-xscripts/yocto-check-layer-wrapper47
203 files changed, 15412 insertions, 10092 deletions
diff --git a/scripts/autobuilder-worker-prereq-tests b/scripts/autobuilder-worker-prereq-tests
new file mode 100755
index 0000000000..5d7e6e2601
--- /dev/null
+++ b/scripts/autobuilder-worker-prereq-tests
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Script which can be run on new autobuilder workers to check all needed configuration is present.
+# Designed to be run in a repo where bitbake/oe-core are already present.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Todo
+# Add testtools/subunit import test
+# Add python3-git test
+# Add pigz test
+# vnc tests/checkvnc?
+# test sendmail works (for QA email notification)
+# test error report submission works
+# test buildistory git repo works?
+#
+
+if [ ! -x $HOME/yocto-autobuilder-helper/scripts/checkvnc ]; then
+ echo "$HOME/yocto-autobuilder-helper should be created."
+ exit 1
+fi
+$HOME/yocto-autobuilder-helper/scripts/checkvnc
+
+. ./oe-init-build-env > /dev/null
+if [ "$?" != "0" ]; then
+ exit 1
+fi
+git config --global user.name > /dev/null
+if [ "$?" != "0" ]; then
+ echo "Please set git config --global user.name"
+ exit 1
+fi
+git config --global user.email > /dev/null
+if [ "$?" != "0" ]; then
+ echo "Please set git config --global user.email"
+ exit 1
+fi
+bitbake -p
+if [ "$?" != "0" ]; then
+ echo "Bitbake parsing failed"
+ exit 1
+fi
+
+WATCHES=$(PATH="/sbin:/usr/sbin:$PATH" sysctl fs.inotify.max_user_watches -n)
+if (( $WATCHES < 65000 )); then
+ echo 'Need to increase watches (echo fs.inotify.max_user_watches=65536 | sudo tee -a /etc/sysctl.conf'
+ exit 1
+fi
+mkdir -p tmp/deploy/images/qemux86-64
+pushd tmp/deploy/images/qemux86-64
+if [ ! -e core-image-minimal-qemux86-64.ext4 ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.ext4
+fi
+if [ ! -e core-image-minimal-qemux86-64.qemuboot.conf ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.qemuboot.conf
+fi
+if [ ! -e bzImage-qemux86-64.bin ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/bzImage-qemux86-64.bin
+fi
+popd
+bitbake qemu-helper-native
+DISPLAY=:1 runqemu serialstdio qemux86-64
+if [ "$?" != "0" ]; then
+ echo "Unable to use runqemu"
+ exit 1
+fi
+DISPLAY=:1 runqemu serialstdio qemux86-64 kvm
+if [ "$?" != "0" ]; then
+ echo "Unable to use runqemu with kvm"
+ exit 1
+fi
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
index fa31b52584..e55d98c72e 100755
--- a/scripts/bitbake-prserv-tool
+++ b/scripts/bitbake-prserv-tool
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
help ()
{
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
index 0207777e63..3095dafa46 100755
--- a/scripts/bitbake-whatchanged
+++ b/scripts/bitbake-whatchanged
@@ -4,18 +4,8 @@
# Copyright (c) 2013 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
diff --git a/scripts/buildhistory-collect-srcrevs b/scripts/buildhistory-collect-srcrevs
index 8a03580f8e..c4d203ff4d 100755
--- a/scripts/buildhistory-collect-srcrevs
+++ b/scripts/buildhistory-collect-srcrevs
@@ -5,18 +5,8 @@
# Copyright 2013 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import os
@@ -101,7 +91,7 @@ def main():
for name, value in srcrevs.items():
orig = orig_srcrevs.get(name, orig_srcrev)
if options.reportall or value != orig:
- all_srcrevs[curdir].append((pn, name, srcrev))
+ all_srcrevs[curdir].append((pn, name, value))
for curdir, srcrevs in sorted(all_srcrevs.items()):
if srcrevs:
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
index d8ca12d3e6..833f7c33a5 100755
--- a/scripts/buildhistory-diff
+++ b/scripts/buildhistory-diff
@@ -4,10 +4,13 @@
#
# Copyright (C) 2013 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
import os
-import optparse
+import argparse
from distutils.version import LooseVersion
# Ensure PythonGit is installed (buildhistory_analysis needs it)
@@ -17,36 +20,74 @@ except ImportError:
print("Please install GitPython (python3-git) 0.3.4 or later in order to use this script")
sys.exit(1)
+def get_args_parser():
+ description = "Reports significant differences in the buildhistory repository."
+
+ parser = argparse.ArgumentParser(description=description,
+ usage="""
+ %(prog)s [options] [from-revision [to-revision]]
+ (if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""")
+
+ parser.add_argument('-p', '--buildhistory-dir',
+ action='store',
+ dest='buildhistory_dir',
+ default='buildhistory/',
+ help="Specify path to buildhistory directory (defaults to buildhistory/ under cwd)")
+ parser.add_argument('-v', '--report-version',
+ action='store_true',
+ dest='report_ver',
+ default=False,
+ help="Report changes in PKGE/PKGV/PKGR even when the values are still the default (PE/PV/PR)")
+ parser.add_argument('-a', '--report-all',
+ action='store_true',
+ dest='report_all',
+ default=False,
+ help="Report all changes, not just the default significant ones")
+ parser.add_argument('-s', '---signatures',
+ action='store_true',
+ dest='sigs',
+ default=False,
+ help="Report list of signatures differing instead of output")
+ parser.add_argument('-S', '--signatures-with-diff',
+ action='store_true',
+ dest='sigsdiff',
+ default=False,
+ help="Report on actual signature differences instead of output (requires signature data to have been generated, either by running the actual tasks or using bitbake -S)")
+ parser.add_argument('-e', '--exclude-path',
+ action='append',
+ help="Exclude path from the output")
+ parser.add_argument('-c', '--colour',
+ choices=('yes', 'no', 'auto'),
+ default="auto",
+ help="Whether to colourise (defaults to auto)")
+ parser.add_argument('revisions',
+ default = ['build-minus-1', 'HEAD'],
+ nargs='*',
+ help=argparse.SUPPRESS)
+ return parser
+
def main():
- parser = optparse.OptionParser(
- description = "Reports significant differences in the buildhistory repository.",
- usage = """
- %prog [options] [from-revision [to-revision]]
-(if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""")
-
- parser.add_option("-p", "--buildhistory-dir",
- help = "Specify path to buildhistory directory (defaults to buildhistory/ under cwd)",
- action="store", dest="buildhistory_dir", default='buildhistory/')
- parser.add_option("-v", "--report-version",
- help = "Report changes in PKGE/PKGV/PKGR even when the values are still the default (PE/PV/PR)",
- action="store_true", dest="report_ver", default=False)
- parser.add_option("-a", "--report-all",
- help = "Report all changes, not just the default significant ones",
- action="store_true", dest="report_all", default=False)
-
- options, args = parser.parse_args(sys.argv)
-
- if len(args) > 3:
- sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args[3:]))
- parser.print_help()
- sys.exit(1)
+
+ parser = get_args_parser()
+ args = parser.parse_args()
if LooseVersion(git.__version__) < '0.3.1':
sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
sys.exit(1)
- if not os.path.exists(options.buildhistory_dir):
- sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % options.buildhistory_dir)
+ if len(args.revisions) > 2:
+ sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
+ parser.print_help()
+
+ sys.exit(1)
+ if not os.path.exists(args.buildhistory_dir):
+ if args.buildhistory_dir == 'buildhistory/':
+ cwd = os.getcwd()
+ if os.path.basename(cwd) == 'buildhistory':
+ args.buildhistory_dir = cwd
+
+ if not os.path.exists(args.buildhistory_dir):
+ sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % args.buildhistory_dir)
parser.print_help()
sys.exit(1)
@@ -60,30 +101,30 @@ def main():
scriptpath.add_oe_lib_path()
# Set path to bitbake lib dir so the buildhistory_analysis module can load bb.utils
bitbakepath = scriptpath.add_bitbake_lib_path()
+
if not bitbakepath:
sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
sys.exit(1)
- import oe.buildhistory_analysis
-
- fromrev = 'build-minus-1'
- torev = 'HEAD'
- if len(args) > 1:
- if len(args) == 2 and '..' in args[1]:
- revs = args[1].split('..')
- fromrev = revs[0]
- if revs[1]:
- torev = revs[1]
+ if len(args.revisions) == 1:
+ if '..' in args.revisions[0]:
+ fromrev, torev = args.revisions[0].split('..')
else:
- fromrev = args[1]
- if len(args) > 2:
- torev = args[2]
+ fromrev, torev = args.revisions[0], 'HEAD'
+ elif len(args.revisions) == 2:
+ fromrev, torev = args.revisions
+ from oe.buildhistory_analysis import init_colours, process_changes
import gitdb
+
+ init_colours({"yes": True, "no": False, "auto": sys.stdout.isatty()}[args.colour])
+
try:
- changes = oe.buildhistory_analysis.process_changes(options.buildhistory_dir, fromrev, torev, options.report_all, options.report_ver)
+ changes = process_changes(args.buildhistory_dir, fromrev, torev,
+ args.report_all, args.report_ver, args.sigs,
+ args.sigsdiff, args.exclude_path)
except gitdb.exc.BadObject as e:
- if len(args) == 1:
+ if not args.revisions:
sys.stderr.write("Unable to find previous build revision in buildhistory repository\n\n")
parser.print_help()
else:
@@ -91,10 +132,11 @@ def main():
sys.exit(1)
for chg in changes:
- print('%s' % chg)
+ out = str(chg)
+ if out:
+ print(out)
sys.exit(0)
-
if __name__ == "__main__":
main()
diff --git a/scripts/buildstats-diff b/scripts/buildstats-diff
index 8ee2aaf626..2f6498ab67 100755
--- a/scripts/buildstats-diff
+++ b/scripts/buildstats-diff
@@ -4,304 +4,167 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import glob
-import json
import logging
+import math
import os
-import re
import sys
-from collections import namedtuple
-from datetime import datetime, timedelta, tzinfo
from operator import attrgetter
+# Import oe libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+from buildstats import BuildStats, diff_buildstats, taskdiff_fields, BSVerDiff
+
+
# Setup logging
-logging.basicConfig(level=logging.INFO)
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger()
-class TimeZone(tzinfo):
- """Simple fixed-offset tzinfo"""
- def __init__(self, seconds, name):
- self._offset = timedelta(seconds=seconds)
- self._name = name
-
- def utcoffset(self, dt):
- return self._offset
-
- def tzname(self, dt):
- return self._name
-
- def dst(self, dt):
- return None
-
-TIMEZONES = {'UTC': TimeZone(0, 'UTC'),
- 'EET': TimeZone(7200, 'EET'),
- 'EEST': TimeZone(10800, 'EEST')}
-
-
-taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'cputime1', 'cputime2',
- 'absdiff', 'reldiff')
-TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
-
-
-def to_datetime_obj(obj):
- """Helper for getting timestamps in datetime format"""
- if isinstance(obj, datetime):
- return obj
- else:
- return datetime.utcfromtimestamp(obj).replace(tzinfo=TIMEZONES['UTC'])
-
-
-def read_buildstats_file(buildstat_file):
- """Convert buildstat text file into dict/json"""
- bs_json = {'iostat': {},
- 'rusage': {},
- 'child_rusage': {}}
- log.debug("Reading task buildstats from %s", buildstat_file)
- with open(buildstat_file) as fobj:
- for line in fobj.readlines():
- key, val = line.split(':', 1)
- val = val.strip()
- if key == 'Started':
- start_time = to_datetime_obj(float(val))
- bs_json['start_time'] = start_time
- elif key == 'Ended':
- end_time = to_datetime_obj(float(val))
- elif key.startswith('IO '):
- split = key.split()
- bs_json['iostat'][split[1]] = int(val)
- elif key.find('rusage') >= 0:
- split = key.split()
- ru_key = split[-1]
- if ru_key in ('ru_stime', 'ru_utime'):
- val = float(val)
- else:
- val = int(val)
- ru_type = 'rusage' if split[0] == 'rusage' else \
- 'child_rusage'
- bs_json[ru_type][ru_key] = val
- elif key == 'Status':
- bs_json['status'] = val
- bs_json['elapsed_time'] = end_time - start_time
- return bs_json
-
-
-def read_buildstats_dir(bs_dir):
- """Read buildstats directory"""
- def split_nevr(nevr):
- """Split name and version information from recipe "nevr" string"""
- n_e_v, revision = nevr.rsplit('-', 1)
- match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
- n_e_v)
- if not match:
- # If we're not able to parse a version starting with a number, just
- # take the part after last dash
- match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
- n_e_v)
- name = match.group('name')
- version = match.group('version')
- epoch = match.group('epoch')
- return name, epoch, version, revision
-
- if os.path.isfile(os.path.join(bs_dir, 'build_stats')):
- top_dir = bs_dir
- else:
- subdirs = sorted(glob.glob(bs_dir + '/*'))
- if len(subdirs) > 1:
- log.warning("Multiple buildstats found, using the first one")
- top_dir = subdirs[0]
- log.debug("Reading buildstats directory %s", top_dir)
- subdirs = os.listdir(top_dir)
-
- # Handle "old" style directory structure
- if len(subdirs) == 1 and re.match('^20[0-9]{12}$', subdirs[0]):
- top_dir = os.path.join(top_dir, subdirs[0])
- subdirs = os.listdir(top_dir)
-
- buildstats = {}
- for dirname in subdirs:
- recipe_dir = os.path.join(top_dir, dirname)
- if not os.path.isdir(recipe_dir):
- continue
- name, epoch, version, revision = split_nevr(dirname)
- recipe_bs = {'nevr': dirname,
- 'name': name,
- 'epoch': epoch,
- 'version': version,
- 'revision': revision,
- 'tasks': {}}
- for task in os.listdir(recipe_dir):
- recipe_bs['tasks'][task] = read_buildstats_file(
- os.path.join(recipe_dir, task))
- if name in buildstats:
- log.error("Cannot handle multiple versions of the same package (%s)",
- name)
- sys.exit(1)
- buildstats[name] = recipe_bs
-
- return buildstats
+class ScriptError(Exception):
+ """Exception for internal error handling of this script"""
+ pass
-def read_buildstats_json(path):
- """Read buildstats from JSON file"""
- buildstats = {}
- with open(path) as fobj:
- bs_json = json.load(fobj)
- for recipe_bs in bs_json:
- if recipe_bs['name'] in buildstats:
- log.error("Cannot handle multiple versions of the same package (%s)",
- recipe_bs['name'])
- sys.exit(1)
+def read_buildstats(path, multi):
+ """Read buildstats"""
+ if not os.path.exists(path):
+ raise ScriptError("No such file or directory: {}".format(path))
- if recipe_bs['epoch'] is None:
- recipe_bs['nevr'] = "{}-{}-{}".format(recipe_bs['name'], recipe_bs['version'], recipe_bs['revision'])
- else:
- recipe_bs['nevr'] = "{}-{}_{}-{}".format(recipe_bs['name'], recipe_bs['epoch'], recipe_bs['version'], recipe_bs['revision'])
- buildstats[recipe_bs['name']] = recipe_bs
+ if os.path.isfile(path):
+ return BuildStats.from_file_json(path)
- return buildstats
+ if os.path.isfile(os.path.join(path, 'build_stats')):
+ return BuildStats.from_dir(path)
+ # Handle a non-buildstat directory
+ subpaths = sorted(glob.glob(path + '/*'))
+ if len(subpaths) > 1:
+ if multi:
+ log.info("Averaging over {} buildstats from {}".format(
+ len(subpaths), path))
+ else:
+ raise ScriptError("Multiple buildstats found in '{}'. Please give "
+ "a single buildstat directory of use the --multi "
+ "option".format(path))
+ bs = None
+ for subpath in subpaths:
+ if os.path.isfile(subpath):
+ _bs = BuildStats.from_file_json(subpath)
+ else:
+ _bs = BuildStats.from_dir(subpath)
+ if bs is None:
+ bs = _bs
+ else:
+ bs.aggregate(_bs)
+ if not bs:
+ raise ScriptError("No buildstats found under {}".format(path))
-def read_buildstats(path):
- """Read buildstats"""
- if os.path.isfile(path):
- return read_buildstats_json(path)
- else:
- return read_buildstats_dir(path)
+ return bs
def print_ver_diff(bs1, bs2):
"""Print package version differences"""
- pkgs1 = set(bs1.keys())
- pkgs2 = set(bs2.keys())
- new_pkgs = pkgs2 - pkgs1
- deleted_pkgs = pkgs1 - pkgs2
-
- echanged = []
- vchanged = []
- rchanged = []
- unchanged = []
- common_pkgs = pkgs2.intersection(pkgs1)
- if common_pkgs:
- for pkg in common_pkgs:
- if bs1[pkg]['epoch'] != bs2[pkg]['epoch']:
- echanged.append(pkg)
- elif bs1[pkg]['version'] != bs2[pkg]['version']:
- vchanged.append(pkg)
- elif bs1[pkg]['revision'] != bs2[pkg]['revision']:
- rchanged.append(pkg)
- else:
- unchanged.append(pkg)
- maxlen = max([len(pkg) for pkg in pkgs1.union(pkgs2)])
+ diff = BSVerDiff(bs1, bs2)
+
+ maxlen = max([len(r) for r in set(bs1.keys()).union(set(bs2.keys()))])
fmt_str = " {:{maxlen}} ({})"
-# if unchanged:
-# print("\nUNCHANGED PACKAGES:")
-# print("-------------------")
-# maxlen = max([len(pkg) for pkg in unchanged])
-# for pkg in sorted(unchanged):
-# print(fmt_str.format(pkg, bs2[pkg]['nevr'], maxlen=maxlen))
-
- if new_pkgs:
- print("\nNEW PACKAGES:")
- print("-------------")
- for pkg in sorted(new_pkgs):
- print(fmt_str.format(pkg, bs2[pkg]['nevr'], maxlen=maxlen))
-
- if deleted_pkgs:
- print("\nDELETED PACKAGES:")
- print("-----------------")
- for pkg in sorted(deleted_pkgs):
- print(fmt_str.format(pkg, bs1[pkg]['nevr'], maxlen=maxlen))
+
+ if diff.new:
+ print("\nNEW RECIPES:")
+ print("------------")
+ for name, val in sorted(diff.new.items()):
+ print(fmt_str.format(name, val.nevr, maxlen=maxlen))
+
+ if diff.dropped:
+ print("\nDROPPED RECIPES:")
+ print("----------------")
+ for name, val in sorted(diff.dropped.items()):
+ print(fmt_str.format(name, val.nevr, maxlen=maxlen))
fmt_str = " {0:{maxlen}} {1:<20} ({2})"
- if rchanged:
+ if diff.rchanged:
print("\nREVISION CHANGED:")
print("-----------------")
- for pkg in sorted(rchanged):
- field1 = "{} -> {}".format(pkg, bs1[pkg]['revision'], bs2[pkg]['revision'])
- field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
- print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
+ for name, val in sorted(diff.rchanged.items()):
+ field1 = "{} -> {}".format(val.left.revision, val.right.revision)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
- if vchanged:
+ if diff.vchanged:
print("\nVERSION CHANGED:")
print("----------------")
- for pkg in sorted(vchanged):
- field1 = "{} -> {}".format(bs1[pkg]['version'], bs2[pkg]['version'])
- field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
- print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
+ for name, val in sorted(diff.vchanged.items()):
+ field1 = "{} -> {}".format(val.left.version, val.right.version)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
- if echanged:
+ if diff.echanged:
print("\nEPOCH CHANGED:")
print("--------------")
- for pkg in sorted(echanged):
- field1 = "{} -> {}".format(pkg, bs1[pkg]['epoch'], bs2[pkg]['epoch'])
- field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
- print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
-
-
-def task_time(task):
- """Calculate sum of user and system time taken by a task"""
- cputime = task['rusage']['ru_stime'] + task['rusage']['ru_utime'] + \
- task['child_rusage']['ru_stime'] + task['child_rusage']['ru_utime']
- return cputime
+ for name, val in sorted(diff.echanged.items()):
+ field1 = "{} -> {}".format(val.left.epoch, val.right.epoch)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
-def print_task_diff(bs1, bs2, min_cputime=0, min_timediff=0, sort_by=('absdiff',)):
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',), only_tasks=[]):
"""Diff task execution times"""
- tasks_diff = []
- pkg_maxlen = 0
- task_maxlen = 0
-
- pkgs = set(bs1.keys()).union(set(bs2.keys()))
- for pkg in pkgs:
- if len(pkg) > pkg_maxlen:
- pkg_maxlen = len(pkg)
- tasks1 = bs1[pkg]['tasks'] if pkg in bs1 else {}
- tasks2 = bs2[pkg]['tasks'] if pkg in bs2 else {}
- if not tasks1:
- pkg_op = '+ '
- elif not tasks2:
- pkg_op = '- '
- else:
- pkg_op = ' '
-
- for task in set(tasks1.keys()).union(set(tasks2.keys())):
- if len(task) > task_maxlen:
- task_maxlen = len(task)
-
- t1 = task_time(bs1[pkg]['tasks'][task]) if task in tasks1 else 0
- t2 = task_time(bs2[pkg]['tasks'][task]) if task in tasks2 else 0
- task_op = ' '
- if t1 == 0:
- reldiff = float('inf')
- task_op = '+ '
+ def val_to_str(val, human_readable=False):
+ """Convert raw value to printable string"""
+ def hms_time(secs):
+ """Get time in human-readable HH:MM:SS format"""
+ h = int(secs / 3600)
+ m = int((secs % 3600) / 60)
+ s = secs % 60
+ if h == 0:
+ return "{:02d}:{:04.1f}".format(m, s)
else:
- reldiff = 100 * (t2 - t1) / t1
- if t2 == 0:
- task_op = '- '
+ return "{:d}:{:02d}:{:04.1f}".format(h, m, s)
- tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, t1, t2, t2-t1, reldiff))
+ if 'time' in val_type:
+ if human_readable:
+ return hms_time(val)
+ else:
+ return "{:.1f}s".format(val)
+ elif 'bytes' in val_type and human_readable:
+ prefix = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi']
+ dec = int(math.log(val, 2) / 10)
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}B".format(val / (2 ** (10 * dec)),
+ prefix[dec], prec=prec)
+ elif 'ops' in val_type and human_readable:
+ prefix = ['', 'k', 'M', 'G', 'T', 'P']
+ dec = int(math.log(val, 1000))
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}ops".format(val / (1000 ** dec),
+ prefix[dec], prec=prec)
+ return str(int(val))
+
+ def sum_vals(buildstats):
+ """Get cumulative sum of all tasks"""
+ total = 0.0
+ for recipe_data in buildstats.values():
+ for name, bs_task in recipe_data.tasks.items():
+ if not only_tasks or name in only_tasks:
+ total += getattr(bs_task, val_type)
+ return total
- if min_cputime:
- print("Ignoring tasks shorter than {}s".format(min_cputime))
- if min_timediff:
- print("Ignoring time differences shorter than {}s".format(min_timediff))
+ if min_val:
+ print("Ignoring tasks less than {} ({})".format(
+ val_to_str(min_val, True), val_to_str(min_val)))
+ if min_absdiff:
+ print("Ignoring differences less than {} ({})".format(
+ val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
- print()
- print(" {:{pkg_maxlen}} {:{task_maxlen}} {:>8} {:>10} {:>10} {}".format(
- 'PKG', 'TASK', 'ABSDIFF', 'RELDIFF', 'CPUTIME1', 'CPUTIME2',
- pkg_maxlen=pkg_maxlen, task_maxlen=task_maxlen))
+ # Prepare the data
+ tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff, only_tasks)
# Sort our list
for field in reversed(sort_by):
@@ -312,46 +175,37 @@ def print_task_diff(bs1, bs2, min_cputime=0, min_timediff=0, sort_by=('absdiff',
reverse = False
tasks_diff = sorted(tasks_diff, key=attrgetter(field), reverse=reverse)
- for diff in tasks_diff:
- cputime = max(diff.cputime1, diff.cputime2)
- if cputime > min_cputime:
- if abs(diff.absdiff) > min_timediff:
- task_prefix = diff.task_op if diff.pkg_op == ' ' else ' '
- print("{}{:{pkg_maxlen}} {}{:{task_maxlen}} {:+7.1f}s {:+9.1f}% {:9.1f}s -> {:.1f}s".format(
- diff.pkg_op, diff.pkg, task_prefix, diff.task, diff.absdiff, diff.reldiff, diff.cputime1, diff.cputime2,
- pkg_maxlen=pkg_maxlen, task_maxlen=task_maxlen))
- else:
- log.debug("Filtering out %s (difference of %0.1fs)", task, diff.absdiff)
- else:
- log.debug("Filtering out %s (%0.1fs)", task, cputime)
-
-
-def print_timediff_summary(bs1, bs2):
- """Print summary of the timediffs"""
- def total_cputime(buildstats):
- sum = 0.0
- for recipe_data in buildstats.values():
- for task_data in recipe_data['tasks'].values():
- sum += task_time(task_data)
- return sum
-
- def hms_time(secs):
- """Get time in human-readable HH:MM:SS format"""
- h = int(secs / 3600)
- m = int((secs % 3600) / 60)
- s = secs % 60
- if h == 0:
- return "{:02d}:{:04.1f}".format(m, s)
- else:
- return "{:d}:{:02d}:{:04.1f}".format(h, m, s)
+ linedata = [(' ', 'PKG', ' ', 'TASK', 'ABSDIFF', 'RELDIFF',
+ val_type.upper() + '1', val_type.upper() + '2')]
+ field_lens = dict([('len_{}'.format(i), len(f)) for i, f in enumerate(linedata[0])])
- total1 = total_cputime(bs1)
- total2 = total_cputime(bs2)
- print("\nCumulative CPU Time:")
- print (" {:+.1f}s {:+.1f}% {} ({:.1f}s) -> {} ({:.1f}s)".format(
- total2 - total1, 100 * (total2-total1) / total1,
- hms_time(total1), total1, hms_time(total2), total2))
+ # Prepare fields in string format and measure field lengths
+ for diff in tasks_diff:
+ task_prefix = diff.task_op if diff.pkg_op == ' ' else ' '
+ linedata.append((diff.pkg_op, diff.pkg, task_prefix, diff.task,
+ val_to_str(diff.absdiff),
+ '{:+.1f}%'.format(diff.reldiff),
+ val_to_str(diff.value1),
+ val_to_str(diff.value2)))
+ for i, field in enumerate(linedata[-1]):
+ key = 'len_{}'.format(i)
+ if len(field) > field_lens[key]:
+ field_lens[key] = len(field)
+
+ # Print data
+ print()
+ for fields in linedata:
+ print("{:{len_0}}{:{len_1}} {:{len_2}}{:{len_3}} {:>{len_4}} {:>{len_5}} {:>{len_6}} -> {:{len_7}}".format(
+ *fields, **field_lens))
+ # Print summary of the diffs
+ total1 = sum_vals(bs1)
+ total2 = sum_vals(bs2)
+ print("\nCumulative {}:".format(val_type))
+ print (" {} {:+.1f}% {} ({}) -> {} ({})".format(
+ val_to_str(total2 - total1), 100 * (total2-total1) / total1,
+ val_to_str(total1, True), val_to_str(total1),
+ val_to_str(total2, True), val_to_str(total2)))
def parse_args(argv):
@@ -362,23 +216,58 @@ Script for comparing buildstats of two separate builds."""
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=description)
+ min_val_defaults = {'cputime': 3.0,
+ 'read_bytes': 524288,
+ 'write_bytes': 524288,
+ 'read_ops': 500,
+ 'write_ops': 500,
+ 'walltime': 5}
+ min_absdiff_defaults = {'cputime': 1.0,
+ 'read_bytes': 131072,
+ 'write_bytes': 131072,
+ 'read_ops': 50,
+ 'write_ops': 50,
+ 'walltime': 2}
+
parser.add_argument('--debug', '-d', action='store_true',
help="Verbose logging")
parser.add_argument('--ver-diff', action='store_true',
help="Show package version differences and exit")
- parser.add_argument('--min-time', default=3.0, type=float,
- help="Filter out tasks shorter than MIN_TIME seconds")
- parser.add_argument('--min-timediff', default=1.0, type=float,
- help="Filter out tasks whose difference in cputime is "
- "less that MIN_TIMEDIFF seconds")
+ parser.add_argument('--diff-attr', default='cputime',
+ choices=min_val_defaults.keys(),
+ help="Buildstat attribute which to compare")
+ parser.add_argument('--min-val', default=min_val_defaults, type=float,
+ help="Filter out tasks less than MIN_VAL. "
+ "Default depends on --diff-attr.")
+ parser.add_argument('--min-absdiff', default=min_absdiff_defaults, type=float,
+ help="Filter out tasks whose difference is less than "
+ "MIN_ABSDIFF, Default depends on --diff-attr.")
parser.add_argument('--sort-by', default='absdiff',
help="Comma-separated list of field sort order. "
"Prepend the field name with '-' for reversed sort. "
"Available fields are: {}".format(', '.join(taskdiff_fields)))
+ parser.add_argument('--multi', action='store_true',
+ help="Read all buildstats from the given paths and "
+ "average over them")
+ parser.add_argument('--only-task', dest='only_tasks', metavar='TASK', action='append', default=[],
+ help="Only include TASK in report. May be specified multiple times")
parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
- return parser.parse_args(argv)
+ args = parser.parse_args(argv)
+
+ # We do not nedd/want to read all buildstats if we just want to look at the
+ # package versions
+ if args.ver_diff:
+ args.multi = False
+
+ # Handle defaults for the filter arguments
+ if args.min_val is min_val_defaults:
+ args.min_val = min_val_defaults[args.diff_attr]
+ if args.min_absdiff is min_absdiff_defaults:
+ args.min_absdiff = min_absdiff_defaults[args.diff_attr]
+
+ return args
def main(argv=None):
"""Script entry point"""
@@ -395,16 +284,18 @@ def main(argv=None):
sys.exit(1)
sort_by.append(field)
+ try:
+ bs1 = read_buildstats(args.buildstats1, args.multi)
+ bs2 = read_buildstats(args.buildstats2, args.multi)
- bs1 = read_buildstats(args.buildstats1)
- bs2 = read_buildstats(args.buildstats2)
-
- if args.ver_diff:
- print_ver_diff(bs1, bs2)
- else:
- print_task_diff(bs1, bs2, args.min_time, args.min_timediff, sort_by)
- print_timediff_summary(bs1, bs2)
-
+ if args.ver_diff:
+ print_ver_diff(bs1, bs2)
+ else:
+ print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
+ args.min_absdiff, sort_by, args.only_tasks)
+ except ScriptError as err:
+ log.error(str(err))
+ return 1
return 0
if __name__ == "__main__":
diff --git a/scripts/cleanup-workdir b/scripts/cleanup-workdir
deleted file mode 100755
index 98769f6b32..0000000000
--- a/scripts/cleanup-workdir
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) 2012 Wind River Systems, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-import os
-import sys
-import optparse
-import re
-import subprocess
-import shutil
-
-pkg_cur_dirs = {}
-obsolete_dirs = []
-parser = None
-
-def err_quit(msg):
- print(msg)
- parser.print_usage()
- sys.exit(1)
-
-def parse_version(verstr):
- elems = verstr.split(':')
- epoch = elems[0]
- if len(epoch) == 0:
- return elems[1]
- else:
- return epoch + '_' + elems[1]
-
-def run_command(cmd):
- pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
- output = pipe.communicate()[0]
- if pipe.returncode != 0:
- print("Execute command '%s' failed." % cmd)
- sys.exit(1)
- return output.decode('utf-8')
-
-def get_cur_arch_dirs(workdir, arch_dirs):
- pattern = workdir + '/(.*?)/'
-
- cmd = "bitbake -e | grep ^SDK_SYS="
- output = run_command(cmd)
- sdk_sys = output.split('"')[1]
-
- # select thest 5 packages to get the dirs of current arch
- pkgs = ['hicolor-icon-theme', 'base-files', 'acl-native', 'binutils-crosssdk-' + sdk_sys, 'nativesdk-autoconf']
-
- for pkg in pkgs:
- cmd = "bitbake -e " + pkg + " | grep ^IMAGE_ROOTFS="
- output = run_command(cmd)
- output = output.split('"')[1]
- m = re.match(pattern, output)
- arch_dirs.append(m.group(1))
-
-def main():
- global parser
- parser = optparse.OptionParser(
- usage = """%prog
-
-%prog removes the obsolete packages' build directories in WORKDIR.
-This script must be ran under BUILDDIR after source file \"oe-init-build-env\".
-
-Any file or directory under WORKDIR which is not created by Yocto
-will be deleted. Be CAUTIOUS.""")
-
- options, args = parser.parse_args(sys.argv)
-
- builddir = run_command('echo $BUILDDIR').strip()
- if len(builddir) == 0:
- err_quit("Please source file \"oe-init-build-env\" first.\n")
-
- if os.getcwd() != builddir:
- err_quit("Please run %s under: %s\n" % (os.path.basename(args[0]), builddir))
-
- print('Updating bitbake caches...')
- cmd = "bitbake -s"
- output = run_command(cmd)
-
- output = output.split('\n')
- index = 0
- while len(output[index]) > 0:
- index += 1
- alllines = output[index+1:]
-
- for line in alllines:
- # empty again means end of the versions output
- if len(line) == 0:
- break
- line = line.strip()
- line = re.sub('\s+', ' ', line)
- elems = line.split(' ')
- if len(elems) == 2:
- version = parse_version(elems[1])
- else:
- version = parse_version(elems[2])
- pkg_cur_dirs[elems[0]] = version
-
- cmd = "bitbake -e"
- output = run_command(cmd)
-
- tmpdir = None
- image_rootfs = None
- output = output.split('\n')
- for line in output:
- if tmpdir and image_rootfs:
- break
-
- if not tmpdir:
- m = re.match('TMPDIR="(.*)"', line)
- if m:
- tmpdir = m.group(1)
-
- if not image_rootfs:
- m = re.match('IMAGE_ROOTFS="(.*)"', line)
- if m:
- image_rootfs = m.group(1)
-
- # won't fail just in case
- if not tmpdir or not image_rootfs:
- print("Can't get TMPDIR or IMAGE_ROOTFS.")
- return 1
-
- pattern = tmpdir + '/(.*?)/(.*?)/'
- m = re.match(pattern, image_rootfs)
- if not m:
- print("Can't get WORKDIR.")
- return 1
-
- workdir = os.path.join(tmpdir, m.group(1))
-
- # we only deal the dirs of current arch, total numbers of dirs are 6
- cur_arch_dirs = [m.group(2)]
- get_cur_arch_dirs(workdir, cur_arch_dirs)
-
- for workroot, dirs, files in os.walk(workdir):
- # For the files, they should NOT exist in WORKDIR. Remove them.
- for f in files:
- obsolete_dirs.append(os.path.join(workroot, f))
-
- for d in dirs:
- if d not in cur_arch_dirs:
- continue
-
- for pkgroot, pkgdirs, filenames in os.walk(os.path.join(workroot, d)):
- for f in filenames:
- obsolete_dirs.append(os.path.join(pkgroot, f))
-
- for pkgdir in sorted(pkgdirs):
- if pkgdir not in pkg_cur_dirs:
- obsolete_dirs.append(os.path.join(pkgroot, pkgdir))
- else:
- for verroot, verdirs, verfiles in os.walk(os.path.join(pkgroot, pkgdir)):
- for f in verfiles:
- obsolete_dirs.append(os.path.join(pkgroot, f))
- for v in sorted(verdirs):
- if v not in pkg_cur_dirs[pkgdir]:
- obsolete_dirs.append(os.path.join(pkgroot, pkgdir, v))
- break
-
- # just process the top dir of every package under tmp/work/*/,
- # then jump out of the above os.walk()
- break
-
- # it is convenient to use os.walk() to get dirs and files at same time
- # both of them have been dealed in the loop, so jump out
- break
-
- for d in obsolete_dirs:
- print("Deleting %s" % d)
- shutil.rmtree(d, True)
-
- if len(obsolete_dirs):
- print('\nTotal %d items.' % len(obsolete_dirs))
- else:
- print('\nNo obsolete directory found under %s.' % workdir)
-
- return 0
-
-if __name__ == '__main__':
- try:
- ret = main()
- except Exception:
- ret = 2
- import traceback
- traceback.print_exc()
- sys.exit(ret)
diff --git a/scripts/combo-layer b/scripts/combo-layer
index b90bfc8800..9b50e9873d 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -7,18 +7,8 @@
# Paul Eggleton <paul.eggleton@intel.com>
# Richard Purdie <richard.purdie@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import fnmatch
import os, sys
@@ -246,7 +236,7 @@ def action_init(conf, args):
# traditional behavior from "git archive" (preserved
# here) it to choose the first one. This might not be
# intended, so at least warn about it.
- logger.warn("%s: initial revision '%s' not unique, picking result of rev-parse = %s" %
+ logger.warning("%s: initial revision '%s' not unique, picking result of rev-parse = %s" %
(name, initialrev, refs[0]))
initialrev = rev
except:
@@ -294,6 +284,8 @@ def action_init(conf, args):
# again. Uses the list of files created by tar (easier
# than walking the tree).
for file in files.split('\n'):
+ if file.endswith(os.path.sep):
+ continue
for pattern in exclude_patterns:
if fnmatch.fnmatch(file, pattern):
os.unlink(os.path.join(*([extract_dir] + ['..'] * subdir_components + [file])))
@@ -329,7 +321,7 @@ def action_init(conf, args):
# one. The commit should be in both repos with
# the same tree, but better check here.
tree = runcmd('git show -s --pretty=format:%%T %s' % rev).strip()
- with tempfile.NamedTemporaryFile() as editor:
+ with tempfile.NamedTemporaryFile(mode='wt') as editor:
editor.write('''cat >$1 <<EOF
tree %s
author %s
@@ -353,7 +345,7 @@ EOF
# Optional: rewrite history to change commit messages or to move files.
if 'hook' in repo or dest_dir != ".":
filter_branch = ['git', 'filter-branch', '--force']
- with tempfile.NamedTemporaryFile() as hookwrapper:
+ with tempfile.NamedTemporaryFile(mode='wt') as hookwrapper:
if 'hook' in repo:
# Create a shell script wrapper around the original hook that
# can be used by git filter-branch. Hook may or may not have
@@ -426,7 +418,7 @@ file_exclude = %s''' % (name, file_filter or '<empty>', repo.get('file_exclude',
merge.append(name)
# Root all commits which have no parent in the common
# ancestor in the new repository.
- for start in runcmd('git log --pretty=format:%%H --max-parents=0 %s' % name).split('\n'):
+ for start in runcmd('git log --pretty=format:%%H --max-parents=0 %s --' % name).split('\n'):
runcmd('git replace --graft %s %s' % (start, startrev))
try:
runcmd(merge)
@@ -1137,7 +1129,7 @@ def update_with_history(conf, components, revisions, repos):
if hook:
# Need to turn the verbatim commit message into something resembling a patch header
# for the hook.
- with tempfile.NamedTemporaryFile(delete=False) as patch:
+ with tempfile.NamedTemporaryFile(mode='wt', delete=False) as patch:
patch.write('Subject: [PATCH] ')
patch.write(body)
patch.write('\n---\n')
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh
index 1e3a3b9bc8..11547a9826 100755
--- a/scripts/combo-layer-hook-default.sh
+++ b/scripts/combo-layer-hook-default.sh
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Hook to add source component/revision info to commit message
# Parameter:
# $1 patch-file
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
index 136a25570d..e7bd129e9e 100755
--- a/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh
index 106456584d..b1fff0f344 100755
--- a/scripts/contrib/bb-perf/bb-matrix.sh
+++ b/scripts/contrib/bb-perf/bb-matrix.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script runs BB_CMD (typically building core-image-sato) for all
diff --git a/scripts/contrib/bb-perf/buildstats-plot.sh b/scripts/contrib/bb-perf/buildstats-plot.sh
new file mode 100755
index 0000000000..898834e5ac
--- /dev/null
+++ b/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2011, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# DESCRIPTION
+#
+# Produces script data to be consumed by gnuplot. There are two possible plots
+# depending if either the -S parameter is present or not:
+#
+# * without -S: Produces a histogram listing top N recipes/tasks versus
+# stats. The first stat defined in the -s parameter is the one taken
+# into account for ranking
+# * -S: Produces a histogram listing tasks versus stats. In this case,
+# the value of each stat is the sum for that particular stat in all recipes found.
+# Stats values are in descending order defined by the first stat defined on -s
+#
+# EXAMPLES
+#
+# 1. Top recipes' tasks taking into account utime
+#
+# $ buildstats-plot.sh -s utime | gnuplot -p
+#
+# 2. Tasks versus utime:stime
+#
+# $ buildstats-plot.sh -s utime:stime -S | gnuplot -p
+#
+# 3. Tasks versus IO write_bytes:IO read_bytes
+#
+# $ buildstats-plot.sh -s 'IO write_bytes:IO read_bytes' -S | gnuplot -p
+#
+# AUTHORS
+# Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
+#
+
+set -o nounset
+set -o errexit
+
+BS_DIR="tmp/buildstats"
+N=10
+STATS="utime"
+SUM=""
+OUTDATA_FILE="$PWD/buildstats-plot.out"
+
+function usage {
+ CMD=$(basename $0)
+ cat <<EOM
+Usage: $CMD [-b buildstats_dir] [-t do_task]
+ -b buildstats The path where the folder resides
+ (default: "$BS_DIR")
+ -n N Top N recipes to display. Ignored if -S is present
+ (default: "$N")
+ -s stats The stats to be matched. If more that one stat, units
+ should be the same because data is plot as histogram.
+ (see buildstats.sh -h for all options) or any other defined
+ (build)stat separated by colons, i.e. stime:utime
+ (default: "$STATS")
+ -S Sum values for a particular stat for found recipes
+ -o Output data file.
+ (default: "$OUTDATA_FILE")
+ -h Display this help message
+EOM
+}
+
+# Parse and validate arguments
+while getopts "b:n:s:o:Sh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ n)
+ N="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ S)
+ SUM="y"
+ ;;
+ o)
+ OUTDATA_FILE="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# Get number of stats
+IFS=':'; statsarray=(${STATS}); unset IFS
+nstats=${#statsarray[@]}
+
+# Get script folder, use to run buildstats.sh
+CD=$(dirname $0)
+
+# Parse buildstats recipes to produce a single table
+OUTBUILDSTATS="$PWD/buildstats.log"
+$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+
+# Get headers
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+
+echo -e "set boxwidth 0.9 relative"
+echo -e "set style data histograms"
+echo -e "set style fill solid 1.0 border lt -1"
+echo -e "set xtics rotate by 45 right"
+
+# Get output data
+if [ -z "$SUM" ]; then
+ cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+ # include task at recipe column
+ sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+ echo -e "set title \"Top task/recipes\""
+ echo -e "plot for [COL=3:`expr 3 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(stringcolumn(1).' '.stringcolumn(2)) title columnheader(COL)"
+else
+
+ # Construct datatamash sum argument (sum 3 sum 4 ...)
+ declare -a sumargs
+ j=0
+ for i in `seq $nstats`; do
+ sumargs[j]=sum; j=$(( $j + 1 ))
+ sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
+ done
+
+ # Do the processing with datamash
+ cat $OUTBUILDSTATS | sed -e '1d' | datamash -t ' ' -g1 ${sumargs[*]} | sort -k2 -n -r > $OUTDATA_FILE
+
+ # Include headers into resulted file, so we can include gnuplot xtics
+ HEADERS=$(echo $HEADERS | sed -e 's/recipe//1')
+ sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+
+ # Plot
+ echo -e "set title \"Sum stats values per task for all recipes\""
+ echo -e "plot for [COL=2:`expr 2 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(1) title columnheader(COL)"
+fi
+
diff --git a/scripts/contrib/bb-perf/buildstats.sh b/scripts/contrib/bb-perf/buildstats.sh
index 96158a9650..e9ec2d476a 100755
--- a/scripts/contrib/bb-perf/buildstats.sh
+++ b/scripts/contrib/bb-perf/buildstats.sh
@@ -1,41 +1,44 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
-# Given a 'buildstats' path (created by bitbake when setting
-# USER_CLASSES ?= "buildstats" on local.conf) and task names, outputs
-# '<task> <recipe> <elapsed time>' for all recipes. Elapsed times are in
-# seconds, and task should be given without the 'do_' prefix.
+# Given 'buildstats' data (generate by bitbake when setting
+# USER_CLASSES ?= "buildstats" on local.conf), task names and a stats values
+# (these are the ones preset on the buildstats files), outputs
+# '<task> <recipe> <value_1> <value_2> ... <value_n>'. The units are the ones
+# defined at buildstats, which in turn takes data from /proc/[pid] files
#
# Some useful pipelines
#
-# 1. Tasks with largest elapsed times
-# $ buildstats.sh -b <buildstats> | sort -k3 -n -r | head
+# 1. Tasks with largest stime (Amount of time that this process has been scheduled
+# in kernel mode) values
+# $ buildstats.sh -b <buildstats> -s stime | sort -k3 -n -r | head
#
-# 2. Min, max, sum per task (in needs GNU datamash)
-# $ buildstats.sh -b <buildstats> | datamash -t' ' -g1 min 3 max 3 sum 3 | sort -k4 -n -r
+# 2. Min, max, sum utime (Amount of time that this process has been scheduled
+# in user mode) per task (in needs GNU datamash)
+# $ buildstats.sh -b <buildstats> -s utime | datamash -t' ' -g1 min 3 max 3 sum 3 | sort -k4 -n -r
#
# AUTHORS
# Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
#
+
+# Stats, by type
+TIME="utime:stime:cutime:cstime"
+IO="IO wchar:IO write_bytes:IO syscr:IO read_bytes:IO rchar:IO syscw:IO cancelled_write_bytes"
+RUSAGE="rusage ru_utime:rusage ru_stime:rusage ru_maxrss:rusage ru_minflt:rusage ru_majflt:\
+rusage ru_inblock:rusage ru_oublock:rusage ru_nvcsw:rusage ru_nivcsw"
+
+CHILD_RUSAGE="Child rusage ru_utime:Child rusage ru_stime:Child rusage ru_maxrss:Child rusage ru_minflt:\
+Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rusage ru_nvcsw:\
+Child rusage ru_nivcsw"
+
BS_DIR="tmp/buildstats"
TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
+STATS="$TIME"
+HEADER="" # No header by default
function usage {
CMD=$(basename $0)
@@ -45,12 +48,20 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
(default: "$BS_DIR")
-t tasks The tasks to be computed
(default: "$TASKS")
+ -s stats The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
+ or any other defined buildstat separated by colons, i.e. stime:utime
+ (default: "$STATS")
+ Default stat sets:
+ TIME=$TIME
+ IO=$IO
+ RUSAGE=$RUSAGE
+ CHILD_RUSAGE=$CHILD_RUSAGE
-h Display this help message
EOM
}
# Parse and validate arguments
-while getopts "b:t:h" OPT; do
+while getopts "b:t:s:Hh" OPT; do
case $OPT in
b)
BS_DIR="$OPTARG"
@@ -58,6 +69,12 @@ while getopts "b:t:h" OPT; do
t)
TASKS="$OPTARG"
;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ H)
+ HEADER="y"
+ ;;
h)
usage
exit 0
@@ -76,15 +93,50 @@ if [ ! -d "$BS_DIR" ]; then
exit 1
fi
-RECIPE_FIELD=1
-TIME_FIELD=4
+stats=""
+IFS=":"
+for stat in ${STATS}; do
+ case $stat in
+ TIME)
+ stats="${stats}:${TIME}"
+ ;;
+ IO)
+ stats="${stats}:${IO}"
+ ;;
+ RUSAGE)
+ stats="${stats}:${RUSAGE}"
+ ;;
+ CHILD_RUSAGE)
+ stats="${stats}:${CHILD_RUSAGE}"
+ ;;
+ *)
+ stats="${STATS}"
+ esac
+done
+
+# remove possible colon at the beginning
+stats="$(echo "$stats" | sed -e 's/^://1')"
+
+# Provide a header if required by the user
+[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
-tasks=(${TASKS//:/ })
-for task in "${tasks[@]}"; do
+for task in ${TASKS}; do
task="do_${task}"
- for file in $(find ${BS_DIR} -type f -name ${task}); do
- recipe=$(sed -n -e "/$task/p" ${file} | cut -d ':' -f${RECIPE_FIELD})
- time=$(sed -n -e "/$task/p" ${file} | cut -d ':' -f${TIME_FIELD} | cut -d ' ' -f2)
- echo "${task} ${recipe} ${time}"
+ for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+ recipe="$(basename $(dirname $file))"
+ times=""
+ for stat in ${stats}; do
+ [ -z "$stat" ] && { echo "empty stats"; }
+ time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+ # in case the stat is not present, set the value as NA
+ [ -z "$time" ] && { time="NA"; }
+ # Append it to times
+ if [ -z "$times" ]; then
+ times="${time}"
+ else
+ times="${times} ${time}"
+ fi
+ done
+ echo "${task} ${recipe} ${times}"
done
done
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index d8d0594776..090133600b 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -1,18 +1,6 @@
#!/usr/bin/env python3
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
@@ -23,62 +11,38 @@ import os
import os.path
import re
+# Set up sys.path to let us import tinfoil
+scripts_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+lib_path = scripts_path + '/lib'
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+import bb.tinfoil
+
def usage():
- print('Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0]))
+ print('Usage: %s -d FILENAME [-d FILENAME]*' % os.path.basename(sys.argv[0]))
print(' -d FILENAME documentation file to search')
print(' -h, --help display this help and exit')
- print(' -m METADIR meta directory to search for recipes')
print(' -t FILENAME documentation config file (for doc tags)')
print(' -T Only display variables with doc tags (requires -t)')
-def recipe_bbvars(recipe):
- ''' Return a unique set of every bbvar encountered in the recipe '''
- prog = re.compile("[A-Z_]+")
- vset = set()
- try:
- r = open(recipe)
- except IOError as err:
- print('WARNING: Failed to open recipe ', recipe)
- print(err.args[1])
-
- for line in r:
- # Strip any comments from the line
- line = line.rsplit('#')[0]
- vset = vset.union(set(prog.findall(line)))
- r.close()
-
- bbvars = {}
- for v in vset:
- bbvars[v] = 1
-
- return bbvars
-
-def collect_bbvars(metadir):
- ''' Walk the metadir and collect the bbvars from each recipe found '''
- bbvars = {}
- for root,dirs,files in os.walk(metadir):
- for name in files:
- if name.find(".bb") >= 0:
- for key in recipe_bbvars(os.path.join(root,name)).keys():
- if key in bbvars:
- bbvars[key] = bbvars[key] + 1
- else:
- bbvars[key] = 1
- return bbvars
-
-def bbvar_is_documented(var, docfiles):
- prog = re.compile(".*($|[^A-Z_])%s([^A-Z_]|$)" % (var))
- for doc in docfiles:
- try:
- f = open(doc)
- except IOError as err:
- print('WARNING: Failed to open doc ', doc)
- print(err.args[1])
- for line in f:
- if prog.match(line):
- return True
- f.close()
- return False
+def bbvar_is_documented(var, documented_vars):
+ ''' Check if variable (var) is in the list of documented variables(documented_vars) '''
+ if var in documented_vars:
+ return True
+ else:
+ return False
+
+def collect_documented_vars(docfiles):
+ ''' Walk the docfiles and collect the documented variables '''
+ documented_vars = []
+ prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ for d in docfiles:
+ with open(d) as f:
+ documented_vars += var_prog.findall(f.read())
+
+ return documented_vars
def bbvar_doctag(var, docconf):
prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
@@ -100,8 +64,7 @@ def bbvar_doctag(var, docconf):
def main():
docfiles = []
- metadirs = []
- bbvars = {}
+ bbvars = set()
undocumented = []
docconf = ""
onlydoctags = False
@@ -124,12 +87,6 @@ def main():
else:
print('ERROR: documentation file %s is not a regular file' % a)
sys.exit(3)
- elif o == '-m':
- if os.path.isdir(a):
- metadirs.append(a)
- else:
- print('ERROR: meta directory %s is not a directory' % a)
- sys.exit(4)
elif o == "-t":
if os.path.isfile(a):
docconf = a
@@ -143,43 +100,68 @@ def main():
usage()
sys.exit(5)
- if len(metadirs) == 0:
- print('ERROR: no metadir specified')
- usage()
- sys.exit(6)
-
if onlydoctags and docconf == "":
print('ERROR: no docconf specified')
usage()
sys.exit(7)
- # Collect all the variable names from the recipes in the metadirs
- for m in metadirs:
- for key,cnt in collect_bbvars(m).items():
- if key in bbvars:
- bbvars[key] = bbvars[key] + cnt
+ prog = re.compile("^[^a-z]*$")
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+ parser = bb.codeparser.PythonParser('parser', None)
+ datastore = tinfoil.config_data
+
+ def bbvars_update(data):
+ if prog.match(data):
+ bbvars.add(data)
+ if tinfoil.config_data.getVarFlag(data, 'python'):
+ try:
+ parser.parse_python(tinfoil.config_data.getVar(data))
+ except bb.data_smart.ExpansionError:
+ pass
+ for var in parser.references:
+ if prog.match(var):
+ bbvars.add(var)
else:
- bbvars[key] = cnt
+ try:
+ expandedVar = datastore.expandWithRefs(datastore.getVar(data, False), data)
+ for var in expandedVar.references:
+ if prog.match(var):
+ bbvars.add(var)
+ except bb.data_smart.ExpansionError:
+ pass
+
+ # Use tinfoil to collect all the variable names globally
+ for data in datastore:
+ bbvars_update(data)
+
+ # Collect variables from all recipes
+ for recipe in tinfoil.all_recipe_files(variants=False):
+ print("Checking %s" % recipe)
+ for data in tinfoil.parse_recipe_file(recipe):
+ bbvars_update(data)
+
+ documented_vars = collect_documented_vars(docfiles)
# Check each var for documentation
varlen = 0
- for v in bbvars.keys():
+ for v in bbvars:
if len(v) > varlen:
varlen = len(v)
- if not bbvar_is_documented(v, docfiles):
+ if not bbvar_is_documented(v, documented_vars):
undocumented.append(v)
undocumented.sort()
varlen = varlen + 1
# Report all undocumented variables
print('Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars)))
- header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7))
+ header = '%s%s' % (str("VARIABLE").ljust(varlen), str("DOCTAG").ljust(7))
print(header)
print(str("").ljust(len(header), '='))
for v in undocumented:
doctag = bbvar_doctag(v, docconf)
if not onlydoctags or not doctag == "":
- print('%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag))
+ print('%s%s' % (v.ljust(varlen), doctag))
if __name__ == "__main__":
diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh
index d61e438933..fa71d4a2e9 100755
--- a/scripts/contrib/build-perf-test-wrapper.sh
+++ b/scripts/contrib/build-perf-test-wrapper.sh
@@ -4,21 +4,16 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
+# SPDX-License-Identifier: GPL-2.0-only
#
# This script is a simple wrapper around the actual build performance tester
# script. This script initializes the build environment, runs
# oe-build-perf-test and archives the results.
script=`basename $0`
+script_dir=$(realpath $(dirname $0))
+archive_dir=~/perf-results/archives
+
usage () {
cat << EOF
Usage: $script [-h] [-c COMMITISH] [-C GIT_REPO]
@@ -26,30 +21,57 @@ Usage: $script [-h] [-c COMMITISH] [-C GIT_REPO]
Optional arguments:
-h show this help and exit.
-a ARCHIVE_DIR archive results tarball here, give an empty string to
- disable tarball archiving
- -c COMMITISH test (checkout) this commit
+ disable tarball archiving (default: $archive_dir)
+ -c COMMITISH test (checkout) this commit, <branch>:<commit> can be
+ specified to test specific commit of certain branch
-C GIT_REPO commit results into Git
+ -d DOWNLOAD_DIR directory to store downloaded sources in
+ -E EMAIL_ADDR send email report
+ -g GLOBALRES_DIR where to place the globalres file
+ -P GIT_REMOTE push results to a remote Git repository
+ -R DEST rsync reports to a remote destination
-w WORK_DIR work dir for this script
+ (default: GIT_TOP_DIR/build-perf-test)
+ -x create xml report (instead of json)
EOF
}
+get_os_release_var () {
+ ( source /etc/os-release; eval echo '$'$1 )
+}
+
# Parse command line arguments
-archive_dir=~/perf-results/archives
commitish=""
-while getopts "ha:c:C:w:" opt; do
+oe_build_perf_test_extra_opts=()
+oe_git_archive_extra_opts=()
+while getopts "ha:c:C:d:E:g:P:R:w:x" opt; do
case $opt in
h) usage
exit 0
;;
- a) archive_dir=`realpath "$OPTARG"`
+ a) mkdir -p "$OPTARG"
+ archive_dir=`realpath -s "$OPTARG"`
;;
c) commitish=$OPTARG
;;
- C) results_repo=`realpath "$OPTARG"`
- commit_results=("--commit-results" "$results_repo")
+ C) mkdir -p "$OPTARG"
+ results_repo=`realpath -s "$OPTARG"`
+ ;;
+ d) download_dir=`realpath -s "$OPTARG"`
+ ;;
+ E) email_to="$OPTARG"
+ ;;
+ g) mkdir -p "$OPTARG"
+ globalres_dir=`realpath -s "$OPTARG"`
+ ;;
+ P) oe_git_archive_extra_opts+=("--push" "$OPTARG")
+ ;;
+ R) rsync_dst="$OPTARG"
+ ;;
+ w) base_dir=`realpath -s "$OPTARG"`
;;
- w) base_dir=`realpath "$OPTARG"`
+ x) oe_build_perf_test_extra_opts+=("--xml")
;;
*) usage
exit 1
@@ -57,6 +79,36 @@ while getopts "ha:c:C:w:" opt; do
esac
done
+# Check positional args
+shift "$((OPTIND - 1))"
+if [ $# -ne 0 ]; then
+ echo "ERROR: No positional args are accepted."
+ usage
+ exit 1
+fi
+
+if [ -n "$email_to" ]; then
+ if ! [ -x "$(command -v phantomjs)" ]; then
+ echo "ERROR: Sending email needs phantomjs."
+ exit 1
+ fi
+ if ! [ -x "$(command -v optipng)" ]; then
+ echo "ERROR: Sending email needs optipng."
+ exit 1
+ fi
+fi
+
+# Open a file descriptor for flock and acquire lock
+LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
+if ! exec 3> "$LOCK_FILE"; then
+ echo "ERROR: Unable to open lock file"
+ exit 1
+fi
+if ! flock -n 3; then
+ echo "ERROR: Another instance of this script is running"
+ exit 1
+fi
+
echo "Running on `uname -n`"
if ! git_topdir=$(git rev-parse --show-toplevel); then
echo "The current working dir doesn't seem to be a git clone. Please cd there before running `basename $0`"
@@ -66,28 +118,58 @@ fi
cd "$git_topdir"
if [ -n "$commitish" ]; then
- # Checkout correct revision
- echo "Checking out $commitish"
+ echo "Running git fetch"
git fetch &> /dev/null
git checkout HEAD^0 &> /dev/null
- git branch -D $commitish &> /dev/null
- if ! git checkout -f $commitish &> /dev/null; then
- echo "Git checkout failed"
+
+ # Handle <branch>:<commit> format
+ if echo "$commitish" | grep -q ":"; then
+ commit=`echo "$commitish" | cut -d":" -f2`
+ branch=`echo "$commitish" | cut -d":" -f1`
+ else
+ commit="$commitish"
+ branch="$commitish"
+ fi
+
+ echo "Checking out $commitish"
+ git branch -D $branch &> /dev/null
+ if ! git checkout -f $branch &> /dev/null; then
+ echo "ERROR: Git checkout failed"
+ exit 1
+ fi
+
+ # Check that the specified branch really contains the commit
+ commit_hash=`git rev-parse --revs-only $commit --`
+ if [ -z "$commit_hash" -o "`git merge-base $branch $commit`" != "$commit_hash" ]; then
+ echo "ERROR: branch $branch does not contain commit $commit"
exit 1
fi
+ git reset --hard $commit > /dev/null
fi
+# Determine name of the current branch
+branch=`git symbolic-ref HEAD 2> /dev/null`
+# Strip refs/heads/
+branch=${branch:11}
+
# Setup build environment
if [ -z "$base_dir" ]; then
base_dir="$git_topdir/build-perf-test"
fi
echo "Using working dir $base_dir"
+if [ -z "$download_dir" ]; then
+ download_dir="$base_dir/downloads"
+fi
+if [ -z "$globalres_dir" ]; then
+ globalres_dir="$base_dir"
+fi
+
timestamp=`date "+%Y%m%d%H%M%S"`
git_rev=$(git rev-parse --short HEAD) || exit 1
build_dir="$base_dir/build-$git_rev-$timestamp"
results_dir="$base_dir/results-$git_rev-$timestamp"
-globalres_log="$base_dir/globalres.log"
+globalres_log="$globalres_dir/globalres.log"
machine="qemux86"
mkdir -p "$base_dir"
@@ -98,7 +180,7 @@ auto_conf="$build_dir/conf/auto.conf"
echo "MACHINE = \"$machine\"" > "$auto_conf"
echo 'BB_NUMBER_THREADS = "8"' >> "$auto_conf"
echo 'PARALLEL_MAKE = "-j 8"' >> "$auto_conf"
-echo "DL_DIR = \"$base_dir/downloads\"" >> "$auto_conf"
+echo "DL_DIR = \"$download_dir\"" >> "$auto_conf"
# Disabling network sanity check slightly reduces the variance of timing results
echo 'CONNECTIVITY_CHECK_URIS = ""' >> "$auto_conf"
# Possibility to define extra settings
@@ -107,16 +189,54 @@ if [ -f "$base_dir/auto.conf.extra" ]; then
fi
# Run actual test script
-if ! oe-build-perf-test --out-dir "$results_dir" \
- --globalres-file "$globalres_log" \
- --lock-file "$base_dir/oe-build-perf.lock" \
- "${commit_results[@]}" \
- --commit-results-branch "{tester_host}/{git_branch}/$machine" \
- --commit-results-tag "{tester_host}/{git_branch}/$machine/{git_commit_count}-g{git_commit}/{tag_num}"; then
- echo "oe-build-perf-test script failed!"
- exit 1
+oe-build-perf-test --out-dir "$results_dir" \
+ --globalres-file "$globalres_log" \
+ "${oe_build_perf_test_extra_opts[@]}" \
+ --lock-file "$base_dir/oe-build-perf.lock"
+
+case $? in
+ 1) echo "ERROR: oe-build-perf-test script failed!"
+ exit 1
+ ;;
+ 2) echo "NOTE: some tests failed!"
+ ;;
+esac
+
+# Commit results to git
+if [ -n "$results_repo" ]; then
+ echo -e "\nArchiving results in $results_repo"
+ oe-git-archive \
+ --git-dir "$results_repo" \
+ --branch-name "{hostname}/{branch}/{machine}" \
+ --tag-name "{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}" \
+ --exclude "buildstats.json" \
+ --notes "buildstats/{branch_name}" "$results_dir/buildstats.json" \
+ "${oe_git_archive_extra_opts[@]}" \
+ "$results_dir"
+
+ # Generate test reports
+ sanitized_branch=`echo $branch | tr / _`
+ report_txt=`hostname`_${sanitized_branch}_${machine}.txt
+ report_html=`hostname`_${sanitized_branch}_${machine}.html
+ echo -e "\nGenerating test report"
+ oe-build-perf-report -r "$results_repo" > $report_txt
+ oe-build-perf-report -r "$results_repo" --html > $report_html
+
+ # Send email report
+ if [ -n "$email_to" ]; then
+ echo "Emailing test report"
+ os_name=`get_os_release_var PRETTY_NAME`
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ fi
+
+ # Upload report files, unless we're on detached head
+ if [ -n "$rsync_dst" -a -n "$branch" ]; then
+ echo "Uploading test report"
+ rsync $report_txt $report_html $rsync_dst
+ fi
fi
+
echo -ne "\n\n-----------------\n"
echo "Global results file:"
echo -ne "\n"
diff --git a/scripts/contrib/build-perf-test.sh b/scripts/contrib/build-perf-test.sh
deleted file mode 100755
index 7d99228c73..0000000000
--- a/scripts/contrib/build-perf-test.sh
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/bin/bash
-#
-# This script runs a series of tests (with and without sstate) and reports build time (and tmp/ size)
-#
-# Build performance test script
-#
-# Copyright 2013 Intel Corporation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-#
-# AUTHORS:
-# Stefan Stanacar <stefanx.stanacar@intel.com>
-
-
-ME=$(basename $0)
-
-#
-# usage and setup
-#
-
-usage () {
-cat << EOT
-Usage: $ME [-h]
- $ME [-c <commit>] [-v] [-m <val>] [-j <val>] [-t <val>] [-i <image-name>] [-d <path>]
-Options:
- -h
- Display this help and exit.
- -c <commit>
- git checkout <commit> before anything else
- -v
- Show bitbake output, don't redirect it to a log.
- -m <machine>
- Value for MACHINE. Default is qemux86.
- -j <val>
- Value for PARALLEL_MAKE. Default is 8.
- -t <val>
- Value for BB_NUMBER_THREADS. Default is 8.
- -i <image-name>
- Instead of timing against core-image-sato, use <image-name>
- -d <path>
- Use <path> as DL_DIR
- -p <githash>
- Cherry pick githash onto the commit
-
-Note: current working directory must be inside a poky git clone.
-
-EOT
-}
-
-
-if clonedir=$(git rev-parse --show-toplevel); then
- cd $clonedir
-else
- echo "The current working dir doesn't seem to be a poky git clone. Please cd there before running $ME"
- exit 1
-fi
-
-IMAGE="core-image-sato"
-verbose=0
-dldir=
-commit=
-pmake=
-cherrypicks=
-while getopts "hvc:m:j:t:i:d:p:" opt; do
- case $opt in
- h) usage
- exit 0
- ;;
- v) verbose=1
- ;;
- c) commit=$OPTARG
- ;;
- m) export MACHINE=$OPTARG
- ;;
- j) pmake=$OPTARG
- ;;
- t) export BB_NUMBER_THREADS=$OPTARG
- ;;
- i) IMAGE=$OPTARG
- ;;
- d) dldir=$OPTARG
- ;;
- p) cherrypicks="$cherrypicks $OPTARG"
- ;;
- *) usage
- exit 1
- ;;
- esac
-done
-
-
-#drop cached credentials and test for sudo access without a password
-sudo -k -n ls > /dev/null 2>&1
-reqpass=$?
-if [ $reqpass -ne 0 ]; then
- echo "The script requires sudo access to drop caches between builds (echo 3 > /proc/sys/vm/drop_caches)"
- read -s -p "Please enter your sudo password: " pass
- echo
-fi
-
-if [ -n "$commit" ]; then
- echo "git checkout -f $commit"
- git pull > /dev/null 2>&1
- git checkout -f $commit || exit 1
- git pull > /dev/null 2>&1
-fi
-
-if [ -n "$cherrypicks" ]; then
- for c in $cherrypicks; do
- git cherry-pick $c
- done
-fi
-
-rev=$(git rev-parse --short HEAD) || exit 1
-OUTDIR="$clonedir/build-perf-test/results-$rev-`date "+%Y%m%d%H%M%S"`"
-BUILDDIR="$OUTDIR/build"
-resultsfile="$OUTDIR/results.log"
-cmdoutput="$OUTDIR/commands.log"
-myoutput="$OUTDIR/output.log"
-globalres="$clonedir/build-perf-test/globalres.log"
-
-mkdir -p $OUTDIR || exit 1
-
-log () {
- local msg="$1"
- echo "`date`: $msg" | tee -a $myoutput
-}
-
-
-#
-# Config stuff
-#
-
-branch=`git branch 2>&1 | grep "^* " | tr -d "* "`
-gitcommit=$(git rev-parse HEAD) || exit 1
-log "Running on $branch:$gitcommit"
-
-source ./oe-init-build-env $OUTDIR/build >/dev/null || exit 1
-cd $OUTDIR/build
-
-[ -n "$MACHINE" ] || export MACHINE="qemux86"
-[ -n "$BB_NUMBER_THREADS" ] || export BB_NUMBER_THREADS="8"
-
-if [ -n "$pmake" ]; then
- export PARALLEL_MAKE="-j $pmake"
-else
- export PARALLEL_MAKE="-j 8"
-fi
-
-if [ -n "$dldir" ]; then
- echo "DL_DIR = \"$dldir\"" >> conf/local.conf
-else
- echo "DL_DIR = \"$clonedir/build-perf-test/downloads\"" >> conf/local.conf
-fi
-
-# Sometimes I've noticed big differences in timings for the same commit, on the same machine
-# Disabling the network sanity check helps a bit (because of my crappy network connection and/or proxy)
-echo "CONNECTIVITY_CHECK_URIS =\"\"" >> conf/local.conf
-
-
-#
-# Functions
-#
-
-declare -a TIMES
-time_count=0
-declare -a SIZES
-size_count=0
-
-time_cmd () {
- log " Timing: $*"
-
- if [ $verbose -eq 0 ]; then
- /usr/bin/time -v -o $resultsfile "$@" >> $cmdoutput
- else
- /usr/bin/time -v -o $resultsfile "$@"
- fi
- ret=$?
- if [ $ret -eq 0 ]; then
- t=`grep wall $resultsfile | sed 's/.*m:ss): //'`
- log " TIME: $t"
- TIMES[(( time_count++ ))]="$t"
- else
- log "ERROR: exit status was non-zero, will report time as 0."
- TIMES[(( time_count++ ))]="0"
- fi
-
- #time by default overwrites the output file and we want to keep the results
- #it has an append option but I don't want to clobber the results in the same file
- i=`ls $OUTDIR/results.log* |wc -l`
- mv $resultsfile "${resultsfile}.${i}"
- log "More stats can be found in ${resultsfile}.${i}"
-}
-
-bbtime () {
- time_cmd bitbake "$@"
-}
-
-#we don't time bitbake here
-bbnotime () {
- local arg="$@"
- log " Running: bitbake ${arg}"
- if [ $verbose -eq 0 ]; then
- bitbake ${arg} >> $cmdoutput
- else
- bitbake ${arg}
- fi
- ret=$?
- if [ $ret -eq 0 ]; then
- log " Finished bitbake ${arg}"
- else
- log "ERROR: exit status was non-zero. Exit.."
- exit $ret
- fi
-
-}
-
-do_rmtmp() {
- log " Removing tmp"
- rm -rf bitbake.lock pseudodone conf/sanity_info cache tmp
-}
-do_rmsstate () {
- log " Removing sstate-cache"
- rm -rf sstate-cache
-}
-do_sync () {
- log " Syncing and dropping caches"
- sync; sync
- if [ $reqpass -eq 0 ]; then
- sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"
- else
- echo "$pass" | sudo -S sh -c "echo 3 > /proc/sys/vm/drop_caches"
- echo
- fi
- sleep 3
-}
-
-write_results() {
- echo -n "`uname -n`,$branch:$gitcommit,`git describe`," >> $globalres
- for i in "${TIMES[@]}"; do
- echo -n "$i," >> $globalres
- done
- for i in "${SIZES[@]}"; do
- echo -n "$i," >> $globalres
- done
- echo >> $globalres
- sed -i '$ s/,$//' $globalres
-}
-
-####
-
-#
-# Test 1
-# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir (w/o rm_work and w/ rm_work)
-# Pre: Downloaded sources, no sstate
-# Steps:
-# Part1:
-# - fetchall
-# - clean build dir
-# - time bitbake core-image-sato
-# - collect data
-# Part2:
-# - bitbake virtual/kernel -c cleansstate
-# - time bitbake virtual/kernel
-# Part3:
-# - add INHERIT to local.conf
-# - clean build dir
-# - build
-# - report size, remove INHERIT
-
-test1_p1 () {
- log "Running Test 1, part 1/3: Measure wall clock of bitbake $IMAGE and size of tmp/ dir"
- bbnotime $IMAGE -c fetchall
- do_rmtmp
- do_rmsstate
- do_sync
- bbtime $IMAGE
- s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
- SIZES[(( size_count++ ))]="$s"
- log "SIZE of tmp dir is: $s"
- log "Buildstats are saved in $OUTDIR/buildstats-test1"
- mv tmp/buildstats $OUTDIR/buildstats-test1
-}
-
-
-test1_p2 () {
- log "Running Test 1, part 2/3: bitbake virtual/kernel -c cleansstate and time bitbake virtual/kernel"
- bbnotime virtual/kernel -c cleansstate
- do_sync
- bbtime virtual/kernel
-}
-
-test1_p3 () {
- log "Running Test 1, part 3/3: Build $IMAGE w/o sstate and report size of tmp/dir with rm_work enabled"
- echo "INHERIT += \"rm_work\"" >> conf/local.conf
- do_rmtmp
- do_rmsstate
- do_sync
- bbtime $IMAGE
- sed -i 's/INHERIT += \"rm_work\"//' conf/local.conf
- s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
- SIZES[(( size_count++ ))]="$s"
- log "SIZE of tmp dir is: $s"
- log "Buildstats are saved in $OUTDIR/buildstats-test13"
- mv tmp/buildstats $OUTDIR/buildstats-test13
-}
-
-
-#
-# Test 2
-# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir
-# Pre: populated sstate cache
-
-test2 () {
- # Assuming test 1 has run
- log "Running Test 2: Measure wall clock of bitbake $IMAGE -c rootfs with sstate"
- do_rmtmp
- do_sync
- bbtime $IMAGE -c rootfs
-}
-
-
-# Test 3
-# parsing time metrics
-#
-# Start with
-# i) "rm -rf tmp/cache; time bitbake -p"
-# ii) "rm -rf tmp/cache/default-glibc/; time bitbake -p"
-# iii) "time bitbake -p"
-
-
-test3 () {
- log "Running Test 3: Parsing time metrics (bitbake -p)"
- log " Removing tmp/cache && cache"
- rm -rf tmp/cache cache
- bbtime -p
- log " Removing tmp/cache/default-glibc/"
- rm -rf tmp/cache/default-glibc/
- bbtime -p
- bbtime -p
-}
-
-#
-# Test 4 - eSDK
-# Measure: eSDK size and installation time
-test4 () {
- log "Running Test 4: eSDK size and installation time"
- bbnotime $IMAGE -c do_populate_sdk_ext
-
- esdk_installer=(tmp/deploy/sdk/*-toolchain-ext-*.sh)
-
- if [ ${#esdk_installer[*]} -eq 1 ]; then
- s=$((`stat -c %s "$esdk_installer"` / 1024))
- SIZES[(( size_count++ ))]="$s"
- log "Download SIZE of eSDK is: $s kB"
-
- do_sync
- time_cmd "$esdk_installer" -y -d "tmp/esdk-deploy"
-
- s=$((`du -sb "tmp/esdk-deploy" | cut -f1` / 1024))
- SIZES[(( size_count++ ))]="$s"
- log "Install SIZE of eSDK is: $s kB"
- else
- log "ERROR: other than one sdk found (${esdk_installer[*]}), reporting size and time as 0."
- SIZES[(( size_count++ ))]="0"
- TIMES[(( time_count++ ))]="0"
- fi
-
-}
-
-
-# RUN!
-
-test1_p1
-test1_p2
-test1_p3
-test2
-test3
-test4
-
-# if we got til here write to global results
-write_results
-
-log "All done, cleaning up..."
-
-do_rmtmp
-do_rmsstate
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage
index ab929957a5..7f2ad112a6 100755
--- a/scripts/contrib/ddimage
+++ b/scripts/contrib/ddimage
@@ -1,8 +1,7 @@
#!/bin/sh
-
-# Default to avoiding the first two disks on typical Linux and Mac OS installs
-# Better safe than sorry :-)
-BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2"
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# 1MB blocksize
BLOCKSIZE=1048576
@@ -29,7 +28,6 @@ image_details() {
}
device_details() {
- DEV=$1
BLOCK_SIZE=512
echo "Device details"
@@ -42,11 +40,17 @@ device_details() {
fi
# Default / Linux information collection
- echo " device: $DEVICE"
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+ DEV=`basename $ACTUAL_DEVICE`
+ if [ "$ACTUAL_DEVICE" != "$DEVICE" ] ; then
+ echo " device: $DEVICE -> $ACTUAL_DEVICE"
+ else
+ echo " device: $DEVICE"
+ fi
if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
else
- echo " vendor: UNKOWN"
+ echo " vendor: UNKNOWN"
fi
if [ -f "/sys/class/block/$DEV/device/model" ]; then
echo " model: $(cat /sys/class/block/$DEV/device/model)"
@@ -61,6 +65,49 @@ device_details() {
echo ""
}
+check_mount_device() {
+ if cat /proc/self/mounts | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1$" ; then
+ return 0
+ fi
+ return 1
+}
+
+is_mounted() {
+ if [ "$(uname)" = "Darwin" ]; then
+ if df | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1(s[0-9]+)?$" ; then
+ return 0
+ fi
+ else
+ if check_mount_device $1 ; then
+ return 0
+ fi
+ DEV=`basename $1`
+ if [ -d /sys/class/block/$DEV/ ] ; then
+ PARENT_BLKDEV=`basename $(readlink -f "/sys/class/block/$DEV/..")`
+ if [ "$PARENT_BLKDEV" != "block" ] ; then
+ if check_mount_device $PARENT_BLKDEV ; then
+ return 0
+ fi
+ fi
+ for CHILD_BLKDEV in `find /sys/class/block/$DEV/ -mindepth 1 -maxdepth 1 -name "$DEV*" -type d`
+ do
+ if check_mount_device /dev/`basename $CHILD_BLKDEV` ; then
+ return 0
+ fi
+ done
+ fi
+ fi
+ return 1
+}
+
+is_inuse() {
+ HOLDERS_DIR="/sys/class/block/`basename $1`/holders"
+ if [ -d $HOLDERS_DIR ] && [ `ls -A $HOLDERS_DIR` ] ; then
+ return 0
+ fi
+ return 1
+}
+
if [ $# -ne 2 ]; then
usage
exit 1
@@ -75,22 +122,37 @@ if [ ! -e "$IMAGE" ]; then
exit 1
fi
+if [ ! -e "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist"
+ usage
+ exit 1
+fi
-for i in ${BLACKLIST_DEVICES}; do
- if [ "$i" = "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE is blacklisted"
- exit 1
- fi
-done
+if [ "$(uname)" = "Darwin" ]; then
+ # readlink doesn't support -f on MacOS, just assume it isn't a symlink
+ ACTUAL_DEVICE=$DEVICE
+else
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+fi
+if is_mounted $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently mounted - check if this is the right device, and unmount it first if so"
+ device_details
+ exit 1
+fi
+if is_inuse $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently in use (possibly part of LVM) - check if this is the right device!"
+ device_details
+ exit 1
+fi
if [ ! -w "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE does not exist or is not writable"
+ echo "ERROR: Device $DEVICE is not writable - possibly use sudo?"
usage
exit 1
fi
image_details $IMAGE
-device_details $(basename $DEVICE)
+device_details
printf "Write $IMAGE to $DEVICE [y/N]? "
read RESPONSE
diff --git a/scripts/contrib/devtool-stress.py b/scripts/contrib/devtool-stress.py
index d555c51a65..81046ecf49 100755
--- a/scripts/contrib/devtool-stress.py
+++ b/scripts/contrib/devtool-stress.py
@@ -6,18 +6,7 @@
#
# Copyright 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control
index 7550ea53be..ad6070c369 100755
--- a/scripts/contrib/dialog-power-control
+++ b/scripts/contrib/dialog-power-control
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Simple script to show a manual power prompt for when you want to use
# automated hardware testing with testimage.bbclass but you don't have a
# web-enabled power strip or similar to do the power on/off/cycle.
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
index 2144aac936..1191f57a8e 100755
--- a/scripts/contrib/documentation-audit.sh
+++ b/scripts/contrib/documentation-audit.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Perform an audit of which packages provide documentation and which
# are missing -doc packages.
#
@@ -7,7 +9,6 @@
# this script after source'ing the build environment script, so you're
# running it from build/ directory.
#
-# Maintainer: Scott Garman <scott.a.garman@intel.com>
REPORT_DOC_SIMPLE="documentation_exists.txt"
REPORT_DOC_DETAIL="documentation_exists_detail.txt"
diff --git a/scripts/contrib/graph-tool b/scripts/contrib/graph-tool
index 1df5b8c345..6d2e68b82e 100755
--- a/scripts/contrib/graph-tool
+++ b/scripts/contrib/graph-tool
@@ -7,18 +7,7 @@
#
# Copyright 2013 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py
index 389fb97f67..d6de4dc84d 100755
--- a/scripts/contrib/list-packageconfig-flags.py
+++ b/scripts/contrib/list-packageconfig-flags.py
@@ -1,21 +1,10 @@
#!/usr/bin/env python3
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation.
-#
# Copyright (C) 2013 Wind River Systems, Inc.
# Copyright (C) 2014 Intel Corporation
#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
# - list available recipes which have PACKAGECONFIG flags
# - list available PACKAGECONFIG flags and all affected recipes
# - list all recipes and PACKAGECONFIG information
@@ -76,7 +65,7 @@ def collect_pkgs(data_dict):
for fn in data_dict:
pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
pkgconfigflags.pop('doc', None)
- pkgname = data_dict[fn].getVar("P", True)
+ pkgname = data_dict[fn].getVar("PN")
pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
return pkg_dict
@@ -124,9 +113,9 @@ def display_all(data_dict):
''' Display all pkgs and PACKAGECONFIG information '''
print(str("").ljust(50, '='))
for fn in data_dict:
- print('%s' % data_dict[fn].getVar("P", True))
+ print('%s' % data_dict[fn].getVar("P"))
print(fn)
- packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or ''
+ packageconfig = data_dict[fn].getVar("PACKAGECONFIG") or ''
if packageconfig.strip() == '':
packageconfig = 'None'
print('PACKAGECONFIG %s' % packageconfig)
diff --git a/scripts/contrib/mkefidisk.sh b/scripts/contrib/mkefidisk.sh
deleted file mode 100755
index d8db3c0165..0000000000
--- a/scripts/contrib/mkefidisk.sh
+++ /dev/null
@@ -1,459 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2012, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-LANG=C
-
-# Set to 1 to enable additional output
-DEBUG=0
-OUT="/dev/null"
-
-#
-# Defaults
-#
-# 20 Mb for the boot partition
-BOOT_SIZE=20
-# 5% for swap
-SWAP_RATIO=5
-
-# Cleanup after die()
-cleanup() {
- debug "Syncing and unmounting devices"
- # Unmount anything we mounted
- unmount $ROOTFS_MNT || error "Failed to unmount $ROOTFS_MNT"
- unmount $BOOTFS_MNT || error "Failed to unmount $BOOTFS_MNT"
- unmount $HDDIMG_ROOTFS_MNT || error "Failed to unmount $HDDIMG_ROOTFS_MNT"
- unmount $HDDIMG_MNT || error "Failed to unmount $HDDIMG_MNT"
-
- # Remove the TMPDIR
- debug "Removing temporary files"
- if [ -d "$TMPDIR" ]; then
- rm -rf $TMPDIR || error "Failed to remove $TMPDIR"
- fi
-}
-
-trap 'die "Signal Received, Aborting..."' HUP INT TERM
-
-# Logging routines
-WARNINGS=0
-ERRORS=0
-CLEAR="$(tput sgr0)"
-INFO="$(tput bold)"
-RED="$(tput setaf 1)$(tput bold)"
-GREEN="$(tput setaf 2)$(tput bold)"
-YELLOW="$(tput setaf 3)$(tput bold)"
-info() {
- echo "${INFO}$1${CLEAR}"
-}
-error() {
- ERRORS=$((ERRORS+1))
- echo "${RED}$1${CLEAR}"
-}
-warn() {
- WARNINGS=$((WARNINGS+1))
- echo "${YELLOW}$1${CLEAR}"
-}
-success() {
- echo "${GREEN}$1${CLEAR}"
-}
-die() {
- error "$1"
- cleanup
- exit 1
-}
-debug() {
- if [ $DEBUG -eq 1 ]; then
- echo "$1"
- fi
-}
-
-usage() {
- echo "Usage: $(basename $0) [-v] DEVICE HDDIMG TARGET_DEVICE"
- echo " -v: Verbose debug"
- echo " DEVICE: The device to write the image to, e.g. /dev/sdh"
- echo " HDDIMG: The hddimg file to generate the efi disk from"
- echo " TARGET_DEVICE: The device the target will boot from, e.g. /dev/mmcblk0"
-}
-
-image_details() {
- IMG=$1
- info "Image details"
- echo " image: $(stat --printf '%N\n' $IMG)"
- echo " size: $(stat -L --printf '%s bytes\n' $IMG)"
- echo " modified: $(stat -L --printf '%y\n' $IMG)"
- echo " type: $(file -L -b $IMG)"
- echo ""
-}
-
-device_details() {
- DEV=$1
- BLOCK_SIZE=512
-
- info "Device details"
- echo " device: $DEVICE"
- if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
- echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
- else
- echo " vendor: UNKOWN"
- fi
- if [ -f "/sys/class/block/$DEV/device/model" ]; then
- echo " model: $(cat /sys/class/block/$DEV/device/model)"
- else
- echo " model: UNKNOWN"
- fi
- if [ -f "/sys/class/block/$DEV/size" ]; then
- echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes"
- else
- echo " size: UNKNOWN"
- fi
- echo ""
-}
-
-unmount_device() {
- grep -q $DEVICE /proc/mounts
- if [ $? -eq 0 ]; then
- warn "$DEVICE listed in /proc/mounts, attempting to unmount"
- umount $DEVICE* 2>/dev/null
- return $?
- fi
- return 0
-}
-
-unmount() {
- if [ "$1" = "" ] ; then
- return 0
- fi
- grep -q $1 /proc/mounts
- if [ $? -eq 0 ]; then
- debug "Unmounting $1"
- umount $1
- return $?
- fi
- return 0
-}
-
-#
-# Parse and validate arguments
-#
-if [ $# -lt 3 ] || [ $# -gt 4 ]; then
- if [ $# -eq 1 ]; then
- AVAILABLE_DISK=`lsblk | grep "disk" | cut -f 1 -d " "`
- X=0
- for disk in `echo $AVAILABLE_DISK`; do
- mounted=`lsblk /dev/$disk | awk {'print $7'} | sed "s/MOUNTPOINT//"`
- if [ -z "$mounted" ]; then
- UNMOUNTED_AVAILABLES="$UNMOUNTED_AVAILABLES /dev/$disk"
- info "$X - /dev/$disk"
- X=`expr $X + 1`
- fi
- done
- if [ $X -eq 0 ]; then
- die "No unmounted device found."
- fi
- read -p "Choose unmounted device number: " DISK_NUMBER
- X=0
- for line in `echo $UNMOUNTED_AVAILABLES`; do
- if [ $DISK_NUMBER -eq $X ]; then
- DISK_TO_BE_FLASHED=$line
- break
- else
- X=`expr $X + 1`
- fi
- done
- if [ -z "$DISK_TO_BE_FLASHED" ]; then
- die "Option \"$DISK_NUMBER\" is invalid. Choose a valid option"
- else
- if [ -z `echo $DISK_TO_BE_FLASHED | grep "mmc"` ]; then
- TARGET_TO_BE_BOOT="/dev/sda"
- else
- TARGET_TO_BE_BOOT="/dev/mmcblk0"
- fi
- fi
- echo ""
- echo "Choose a name of the device that will be boot from"
- echo -n "Recommended name is: "
- info "$TARGET_TO_BE_BOOT"
- read -p "Is target device okay? [y/N]: " RESPONSE
- if [ "$RESPONSE" != "y" ]; then
- read -p "Choose target device name: " TARGET_TO_BE_BOOT
- fi
- echo ""
- if [ -z "$TARGET_TO_BE_BOOT" ]; then
- die "Error: choose a valid target name"
- fi
- else
- usage
- exit 1
- fi
-fi
-
-if [ "$1" = "-v" ]; then
- DEBUG=1
- OUT="1"
- shift
-fi
-
-if [ -z "$AVAILABLE_DISK" ]; then
- DEVICE=$1
- HDDIMG=$2
- TARGET_DEVICE=$3
-else
- DEVICE=$DISK_TO_BE_FLASHED
- HDDIMG=$1
- TARGET_DEVICE=$TARGET_TO_BE_BOOT
-fi
-
-LINK=$(readlink $DEVICE)
-if [ $? -eq 0 ]; then
- DEVICE="$LINK"
-fi
-
-if [ ! -w "$DEVICE" ]; then
- usage
- if [ ! -e "${DEVICE}" ] ; then
- die "Device $DEVICE cannot be found"
- else
- die "Device $DEVICE is not writable (need to run under sudo?)"
- fi
-fi
-
-if [ ! -e "$HDDIMG" ]; then
- usage
- die "HDDIMG $HDDIMG does not exist"
-fi
-
-#
-# Ensure the hddimg is not mounted
-#
-unmount "$HDDIMG" || die "Failed to unmount $HDDIMG"
-
-#
-# Check if any $DEVICE partitions are mounted
-#
-unmount_device || die "Failed to unmount $DEVICE"
-
-#
-# Confirm device with user
-#
-image_details $HDDIMG
-device_details $(basename $DEVICE)
-echo -n "${INFO}Prepare EFI image on $DEVICE [y/N]?${CLEAR} "
-read RESPONSE
-if [ "$RESPONSE" != "y" ]; then
- echo "Image creation aborted"
- exit 0
-fi
-
-
-#
-# Prepare the temporary working space
-#
-TMPDIR=$(mktemp -d mkefidisk-XXX) || die "Failed to create temporary mounting directory."
-HDDIMG_MNT=$TMPDIR/hddimg
-HDDIMG_ROOTFS_MNT=$TMPDIR/hddimg_rootfs
-ROOTFS_MNT=$TMPDIR/rootfs
-BOOTFS_MNT=$TMPDIR/bootfs
-mkdir $HDDIMG_MNT || die "Failed to create $HDDIMG_MNT"
-mkdir $HDDIMG_ROOTFS_MNT || die "Failed to create $HDDIMG_ROOTFS_MNT"
-mkdir $ROOTFS_MNT || die "Failed to create $ROOTFS_MNT"
-mkdir $BOOTFS_MNT || die "Failed to create $BOOTFS_MNT"
-
-
-#
-# Partition $DEVICE
-#
-DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
-# If the device size is not reported there may not be a valid label
-if [ "$DEVICE_SIZE" = "" ] ; then
- parted -s $DEVICE mklabel msdos || die "Failed to create MSDOS partition table"
- DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
-fi
-SWAP_SIZE=$((DEVICE_SIZE*SWAP_RATIO/100))
-ROOTFS_SIZE=$((DEVICE_SIZE-BOOT_SIZE-SWAP_SIZE))
-ROOTFS_START=$((BOOT_SIZE))
-ROOTFS_END=$((ROOTFS_START+ROOTFS_SIZE))
-SWAP_START=$((ROOTFS_END))
-
-# MMC devices use a partition prefix character 'p'
-PART_PREFIX=""
-if [ ! "${DEVICE#/dev/mmcblk}" = "${DEVICE}" ] || [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
- PART_PREFIX="p"
-fi
-BOOTFS=$DEVICE${PART_PREFIX}1
-ROOTFS=$DEVICE${PART_PREFIX}2
-SWAP=$DEVICE${PART_PREFIX}3
-
-TARGET_PART_PREFIX=""
-if [ ! "${TARGET_DEVICE#/dev/mmcblk}" = "${TARGET_DEVICE}" ]; then
- TARGET_PART_PREFIX="p"
-fi
-TARGET_ROOTFS=$TARGET_DEVICE${TARGET_PART_PREFIX}2
-TARGET_SWAP=$TARGET_DEVICE${TARGET_PART_PREFIX}3
-
-echo ""
-info "Boot partition size: $BOOT_SIZE MB ($BOOTFS)"
-info "ROOTFS partition size: $ROOTFS_SIZE MB ($ROOTFS)"
-info "Swap partition size: $SWAP_SIZE MB ($SWAP)"
-echo ""
-
-# Use MSDOS by default as GPT cannot be reliably distributed in disk image form
-# as it requires the backup table to be on the last block of the device, which
-# of course varies from device to device.
-
-info "Partitioning installation media ($DEVICE)"
-
-debug "Deleting partition table on $DEVICE"
-dd if=/dev/zero of=$DEVICE bs=512 count=2 >$OUT 2>&1 || die "Failed to zero beginning of $DEVICE"
-
-debug "Creating new partition table (MSDOS) on $DEVICE"
-parted -s $DEVICE mklabel msdos >$OUT 2>&1 || die "Failed to create MSDOS partition table"
-
-debug "Creating boot partition on $BOOTFS"
-parted -s $DEVICE mkpart primary 0% $BOOT_SIZE >$OUT 2>&1 || die "Failed to create BOOT partition"
-
-debug "Enabling boot flag on $BOOTFS"
-parted -s $DEVICE set 1 boot on >$OUT 2>&1 || die "Failed to enable boot flag"
-
-debug "Creating ROOTFS partition on $ROOTFS"
-parted -s $DEVICE mkpart primary $ROOTFS_START $ROOTFS_END >$OUT 2>&1 || die "Failed to create ROOTFS partition"
-
-debug "Creating swap partition on $SWAP"
-parted -s $DEVICE mkpart primary $SWAP_START 100% >$OUT 2>&1 || die "Failed to create SWAP partition"
-
-if [ $DEBUG -eq 1 ]; then
- parted -s $DEVICE print
-fi
-
-
-#
-# Check if any $DEVICE partitions are mounted after partitioning
-#
-unmount_device || die "Failed to unmount $DEVICE partitions"
-
-
-#
-# Format $DEVICE partitions
-#
-info "Formatting partitions"
-debug "Formatting $BOOTFS as vfat"
-if [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
- mkfs.vfat -I $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
-else
- mkfs.vfat $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
-fi
-
-debug "Formatting $ROOTFS as ext3"
-mkfs.ext3 -F $ROOTFS -L "ROOT" >$OUT 2>&1 || die "Failed to format $ROOTFS"
-
-debug "Formatting swap partition ($SWAP)"
-mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap"
-
-
-#
-# Installing to $DEVICE
-#
-debug "Mounting images and device in preparation for installation"
-mount -o ro,loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG"
-mount -o ro,loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img"
-mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT"
-mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT"
-
-info "Preparing boot partition"
-EFIDIR="$BOOTFS_MNT/EFI/BOOT"
-cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz"
-# Copy the efi loader and configs (booti*.efi and grub.cfg if it exists)
-cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir"
-# Silently ignore a missing gummiboot loader dir (we might just be a GRUB image)
-cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1
-
-# Update the boot loaders configurations for an installed image
-# Remove any existing root= kernel parameters and:
-# o Add a root= parameter with the target rootfs
-# o Specify ro so fsck can be run during boot
-# o Specify rootwait in case the target media is an asyncronous block device
-# such as MMC or USB disks
-# o Specify "quiet" to minimize boot time when using slow serial consoles
-
-# Look for a GRUB installation
-GRUB_CFG="$EFIDIR/grub.cfg"
-if [ -e "$GRUB_CFG" ]; then
- info "Configuring GRUB"
- # Delete the install entry
- sed -i "/menuentry 'install'/,/^}/d" $GRUB_CFG
- # Delete the initrd lines
- sed -i "/initrd /d" $GRUB_CFG
- # Delete any LABEL= strings
- sed -i "s/ LABEL=[^ ]*/ /" $GRUB_CFG
-
- sed -i "s@ root=[^ ]*@ @" $GRUB_CFG
- sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GRUB_CFG
-fi
-
-# Look for a gummiboot installation
-GUMMI_ENTRIES="$BOOTFS_MNT/loader/entries"
-GUMMI_CFG="$GUMMI_ENTRIES/boot.conf"
-if [ -d "$GUMMI_ENTRIES" ]; then
- info "Configuring Gummiboot"
- # remove the install target if it exists
- rm $GUMMI_ENTRIES/install.conf >$OUT 2>&1
-
- if [ ! -e "$GUMMI_CFG" ]; then
- echo "ERROR: $GUMMI_CFG not found"
- fi
-
- sed -i "/initrd /d" $GUMMI_CFG
- sed -i "s@ root=[^ ]*@ @" $GUMMI_CFG
- sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GUMMI_CFG
-fi
-
-# Ensure we have at least one EFI bootloader configured
-if [ ! -e $GRUB_CFG ] && [ ! -e $GUMMI_CFG ]; then
- die "No EFI bootloader configuration found"
-fi
-
-
-info "Copying ROOTFS files (this may take a while)"
-cp -a $HDDIMG_ROOTFS_MNT/* $ROOTFS_MNT >$OUT 2>&1 || die "Root FS copy failed"
-
-echo "$TARGET_SWAP swap swap defaults 0 0" >> $ROOTFS_MNT/etc/fstab
-
-# We dont want udev to mount our root device while we're booting...
-if [ -d $ROOTFS_MNT/etc/udev/ ] ; then
- echo "$TARGET_DEVICE" >> $ROOTFS_MNT/etc/udev/mount.blacklist
-fi
-
-# Add startup.nsh script for automated boot
-echo "fs0:\EFI\BOOT\bootx64.efi" > $BOOTFS_MNT/startup.nsh
-
-
-# Call cleanup to unmount devices and images and remove the TMPDIR
-cleanup
-
-echo ""
-if [ $WARNINGS -ne 0 ] && [ $ERRORS -eq 0 ]; then
- echo "${YELLOW}Installation completed with warnings${CLEAR}"
- echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
-elif [ $ERRORS -ne 0 ]; then
- echo "${RED}Installation encountered errors${CLEAR}"
- echo "${RED}Errors: $ERRORS${CLEAR}"
- echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
-else
- success "Installation completed successfully"
-fi
-echo ""
diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py
new file mode 100755
index 0000000000..de3862c897
--- /dev/null
+++ b/scripts/contrib/oe-build-perf-report-email.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python3
+#
+# Send build performance test report emails
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import base64
+import logging
+import os
+import pwd
+import re
+import shutil
+import smtplib
+import socket
+import subprocess
+import sys
+import tempfile
+from email.mime.image import MIMEImage
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+
+# Find js scaper script
+SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
+ 'scrape-html-report.js')
+if not os.path.isfile(SCRAPE_JS):
+ log.error("Unableto find oe-build-perf-report-scrape.js")
+ sys.exit(1)
+
+
+class ReportError(Exception):
+ """Local errors"""
+ pass
+
+
+def check_utils():
+ """Check that all needed utils are installed in the system"""
+ missing = []
+ for cmd in ('phantomjs', 'optipng'):
+ if not shutil.which(cmd):
+ missing.append(cmd)
+ if missing:
+ log.error("The following tools are missing: %s", ' '.join(missing))
+ sys.exit(1)
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ description = """Email build perf test report"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--quiet', '-q', action='store_true',
+ help="Only print errors")
+ parser.add_argument('--to', action='append',
+ help="Recipients of the email")
+ parser.add_argument('--cc', action='append',
+ help="Carbon copy recipients of the email")
+ parser.add_argument('--bcc', action='append',
+ help="Blind carbon copy recipients of the email")
+ parser.add_argument('--subject', default="Yocto build perf test report",
+ help="Email subject")
+ parser.add_argument('--outdir', '-o',
+ help="Store files in OUTDIR. Can be used to preserve "
+ "the email parts")
+ parser.add_argument('--text',
+ help="Plain text message")
+ parser.add_argument('--html',
+ help="HTML peport generated by oe-build-perf-report")
+ parser.add_argument('--phantomjs-args', action='append',
+ help="Extra command line arguments passed to PhantomJS")
+
+ args = parser.parse_args(argv)
+
+ if not args.html and not args.text:
+ parser.error("Please specify --html and/or --text")
+
+ return args
+
+
+def decode_png(infile, outfile):
+ """Parse/decode/optimize png data from a html element"""
+ with open(infile) as f:
+ raw_data = f.read()
+
+ # Grab raw base64 data
+ b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
+ b64_data = re.sub('">.+$', '', b64_data, 1)
+
+ # Replace file with proper decoded png
+ with open(outfile, 'wb') as f:
+ f.write(base64.b64decode(b64_data))
+
+ subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
+
+
+def mangle_html_report(infile, outfile, pngs):
+ """Mangle html file into a email compatible format"""
+ paste = True
+ png_dir = os.path.dirname(outfile)
+ with open(infile) as f_in:
+ with open(outfile, 'w') as f_out:
+ for line in f_in.readlines():
+ stripped = line.strip()
+ # Strip out scripts
+ if stripped == '<!--START-OF-SCRIPTS-->':
+ paste = False
+ elif stripped == '<!--END-OF-SCRIPTS-->':
+ paste = True
+ elif paste:
+ if re.match('^.+href="data:image/png;base64', stripped):
+ # Strip out encoded pngs (as they're huge in size)
+ continue
+ elif 'www.gstatic.com' in stripped:
+ # HACK: drop references to external static pages
+ continue
+
+ # Replace charts with <img> elements
+ match = re.match('<div id="(?P<id>\w+)"', stripped)
+ if match and match.group('id') in pngs:
+ f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
+ else:
+ f_out.write(line)
+
+
+def scrape_html_report(report, outdir, phantomjs_extra_args=None):
+ """Scrape html report into a format sendable by email"""
+ tmpdir = tempfile.mkdtemp(dir='.')
+ log.debug("Using tmpdir %s for phantomjs output", tmpdir)
+
+ if not os.path.isdir(outdir):
+ os.mkdir(outdir)
+ if os.path.splitext(report)[1] not in ('.html', '.htm'):
+ raise ReportError("Invalid file extension for report, needs to be "
+ "'.html' or '.htm'")
+
+ try:
+ log.info("Scraping HTML report with PhangomJS")
+ extra_args = phantomjs_extra_args if phantomjs_extra_args else []
+ subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
+ [SCRAPE_JS, report, tmpdir],
+ stderr=subprocess.STDOUT)
+
+ pngs = []
+ images = []
+ for fname in os.listdir(tmpdir):
+ base, ext = os.path.splitext(fname)
+ if ext == '.png':
+ log.debug("Decoding %s", fname)
+ decode_png(os.path.join(tmpdir, fname),
+ os.path.join(outdir, fname))
+ pngs.append(base)
+ images.append(fname)
+ elif ext in ('.html', '.htm'):
+ report_file = fname
+ else:
+ log.warning("Unknown file extension: '%s'", ext)
+ #shutil.move(os.path.join(tmpdir, fname), outdir)
+
+ log.debug("Mangling html report file %s", report_file)
+ mangle_html_report(os.path.join(tmpdir, report_file),
+ os.path.join(outdir, report_file), pngs)
+ return (os.path.join(outdir, report_file),
+ [os.path.join(outdir, i) for i in images])
+ finally:
+ shutil.rmtree(tmpdir)
+
+def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
+ blind_copy=[]):
+ """Send email"""
+ # Generate email message
+ text_msg = html_msg = None
+ if text_fn:
+ with open(text_fn) as f:
+ text_msg = MIMEText("Yocto build performance test report.\n" +
+ f.read(), 'plain')
+ if html_fn:
+ html_msg = msg = MIMEMultipart('related')
+ with open(html_fn) as f:
+ html_msg.attach(MIMEText(f.read(), 'html'))
+ for img_fn in image_fns:
+ # Expect that content id is same as the filename
+ cid = os.path.splitext(os.path.basename(img_fn))[0]
+ with open(img_fn, 'rb') as f:
+ image_msg = MIMEImage(f.read())
+ image_msg['Content-ID'] = '<{}>'.format(cid)
+ html_msg.attach(image_msg)
+
+ if text_msg and html_msg:
+ msg = MIMEMultipart('alternative')
+ msg.attach(text_msg)
+ msg.attach(html_msg)
+ elif text_msg:
+ msg = text_msg
+ elif html_msg:
+ msg = html_msg
+ else:
+ raise ReportError("Neither plain text nor html body specified")
+
+ pw_data = pwd.getpwuid(os.getuid())
+ full_name = pw_data.pw_gecos.split(',')[0]
+ email = os.environ.get('EMAIL',
+ '{}@{}'.format(pw_data.pw_name, socket.getfqdn()))
+ msg['From'] = "{} <{}>".format(full_name, email)
+ msg['To'] = ', '.join(recipients)
+ if copy:
+ msg['Cc'] = ', '.join(copy)
+ if blind_copy:
+ msg['Bcc'] = ', '.join(blind_copy)
+ msg['Subject'] = subject
+
+ # Send email
+ with smtplib.SMTP('localhost') as smtp:
+ smtp.send_message(msg)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.quiet:
+ log.setLevel(logging.ERROR)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ check_utils()
+
+ if args.outdir:
+ outdir = args.outdir
+ if not os.path.exists(outdir):
+ os.mkdir(outdir)
+ else:
+ outdir = tempfile.mkdtemp(dir='.')
+
+ try:
+ log.debug("Storing email parts in %s", outdir)
+ html_report = images = None
+ if args.html:
+ html_report, images = scrape_html_report(args.html, outdir,
+ args.phantomjs_args)
+
+ if args.to:
+ log.info("Sending email to %s", ', '.join(args.to))
+ if args.cc:
+ log.info("Copying to %s", ', '.join(args.cc))
+ if args.bcc:
+ log.info("Blind copying to %s", ', '.join(args.bcc))
+ send_email(args.text, html_report, images, args.subject,
+ args.to, args.cc, args.bcc)
+ except subprocess.CalledProcessError as err:
+ log.error("%s, with output:\n%s", str(err), err.output.decode())
+ return 1
+ except ReportError as err:
+ log.error(err)
+ return 1
+ finally:
+ if not args.outdir:
+ log.debug("Wiping %s", outdir)
+ shutil.rmtree(outdir)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/contrib/patchreview.py b/scripts/contrib/patchreview.py
new file mode 100755
index 0000000000..62c509f51c
--- /dev/null
+++ b/scripts/contrib/patchreview.py
@@ -0,0 +1,238 @@
+#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# TODO
+# - option to just list all broken files
+# - test suite
+# - validate signed-off-by
+
+status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied")
+
+class Result:
+ # Whether the patch has an Upstream-Status or not
+ missing_upstream_status = False
+ # If the Upstream-Status tag is malformed in some way (string for bad bit)
+ malformed_upstream_status = None
+ # If the Upstream-Status value is unknown (boolean)
+ unknown_upstream_status = False
+ # The upstream status value (Pending, etc)
+ upstream_status = None
+ # Whether the patch has a Signed-off-by or not
+ missing_sob = False
+ # Whether the Signed-off-by tag is malformed in some way
+ malformed_sob = False
+ # The Signed-off-by tag value
+ sob = None
+ # Whether a patch looks like a CVE but doesn't have a CVE tag
+ missing_cve = False
+
+def blame_patch(patch):
+ """
+ From a patch filename, return a list of "commit summary (author name <author
+ email>)" strings representing the history.
+ """
+ import subprocess
+ return subprocess.check_output(("git", "log",
+ "--follow", "--find-renames", "--diff-filter=A",
+ "--format=%s (%aN <%aE>)",
+ "--", patch)).decode("utf-8").splitlines()
+
+def patchreview(path, patches):
+ import re, os.path
+
+ # General pattern: start of line, optional whitespace, tag with optional
+ # hyphen or spaces, maybe a colon, some whitespace, then the value, all case
+ # insensitive.
+ sob_re = re.compile(r"^[\t ]*(Signed[-_ ]off[-_ ]by:?)[\t ]*(.+)", re.IGNORECASE | re.MULTILINE)
+ status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*(\w*)", re.IGNORECASE | re.MULTILINE)
+ cve_tag_re = re.compile(r"^[\t ]*(CVE:)[\t ]*(.*)", re.IGNORECASE | re.MULTILINE)
+ cve_re = re.compile(r"cve-[0-9]{4}-[0-9]{4,6}", re.IGNORECASE)
+
+ results = {}
+
+ for patch in patches:
+
+ fullpath = os.path.join(path, patch)
+ result = Result()
+ results[fullpath] = result
+
+ content = open(fullpath, encoding='ascii', errors='ignore').read()
+
+ # Find the Signed-off-by tag
+ match = sob_re.search(content)
+ if match:
+ value = match.group(1)
+ if value != "Signed-off-by:":
+ result.malformed_sob = value
+ result.sob = match.group(2)
+ else:
+ result.missing_sob = True
+
+
+ # Find the Upstream-Status tag
+ match = status_re.search(content)
+ if match:
+ value = match.group(1)
+ if value != "Upstream-Status:":
+ result.malformed_upstream_status = value
+
+ value = match.group(2).lower()
+ # TODO: check case
+ if value not in status_values:
+ result.unknown_upstream_status = True
+ result.upstream_status = value
+ else:
+ result.missing_upstream_status = True
+
+ # Check that patches which looks like CVEs have CVE tags
+ if cve_re.search(patch) or cve_re.search(content):
+ if not cve_tag_re.search(content):
+ result.missing_cve = True
+ # TODO: extract CVE list
+
+ return results
+
+
+def analyse(results, want_blame=False, verbose=True):
+ """
+ want_blame: display blame data for each malformed patch
+ verbose: display per-file results instead of just summary
+ """
+
+ # want_blame requires verbose, so disable blame if we're not verbose
+ if want_blame and not verbose:
+ want_blame = False
+
+ total_patches = 0
+ missing_sob = 0
+ malformed_sob = 0
+ missing_status = 0
+ malformed_status = 0
+ missing_cve = 0
+ pending_patches = 0
+
+ for patch in sorted(results):
+ r = results[patch]
+ total_patches += 1
+ need_blame = False
+
+ # Build statistics
+ if r.missing_sob:
+ missing_sob += 1
+ if r.malformed_sob:
+ malformed_sob += 1
+ if r.missing_upstream_status:
+ missing_status += 1
+ if r.malformed_upstream_status or r.unknown_upstream_status:
+ malformed_status += 1
+ # Count patches with no status as pending
+ pending_patches +=1
+ if r.missing_cve:
+ missing_cve += 1
+ if r.upstream_status == "pending":
+ pending_patches += 1
+
+ # Output warnings
+ if r.missing_sob:
+ need_blame = True
+ if verbose:
+ print("Missing Signed-off-by tag (%s)" % patch)
+ if r.malformed_sob:
+ need_blame = True
+ if verbose:
+ print("Malformed Signed-off-by '%s' (%s)" % (r.malformed_sob, patch))
+ if r.missing_cve:
+ need_blame = True
+ if verbose:
+ print("Missing CVE tag (%s)" % patch)
+ if r.missing_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Missing Upstream-Status tag (%s)" % patch)
+ if r.malformed_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Malformed Upstream-Status '%s' (%s)" % (r.malformed_upstream_status, patch))
+ if r.unknown_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Unknown Upstream-Status value '%s' (%s)" % (r.upstream_status, patch))
+
+ if want_blame and need_blame:
+ print("\n".join(blame_patch(patch)) + "\n")
+
+ def percent(num):
+ try:
+ return "%d (%d%%)" % (num, round(num * 100.0 / total_patches))
+ except ZeroDivisionError:
+ return "N/A"
+
+ if verbose:
+ print()
+
+ print("""Total patches found: %d
+Patches missing Signed-off-by: %s
+Patches with malformed Signed-off-by: %s
+Patches missing CVE: %s
+Patches missing Upstream-Status: %s
+Patches with malformed Upstream-Status: %s
+Patches in Pending state: %s""" % (total_patches,
+ percent(missing_sob),
+ percent(malformed_sob),
+ percent(missing_cve),
+ percent(missing_status),
+ percent(malformed_status),
+ percent(pending_patches)))
+
+
+
+def histogram(results):
+ from toolz import recipes, dicttoolz
+ import math
+ counts = recipes.countby(lambda r: r.upstream_status, results.values())
+ bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
+ for k in bars:
+ print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
+
+
+if __name__ == "__main__":
+ import argparse, subprocess, os
+
+ args = argparse.ArgumentParser(description="Patch Review Tool")
+ args.add_argument("-b", "--blame", action="store_true", help="show blame for malformed patches")
+ args.add_argument("-v", "--verbose", action="store_true", help="show per-patch results")
+ args.add_argument("-g", "--histogram", action="store_true", help="show patch histogram")
+ args.add_argument("-j", "--json", help="update JSON")
+ args.add_argument("directory", help="directory to scan")
+ args = args.parse_args()
+
+ patches = subprocess.check_output(("git", "-C", args.directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff")).decode("utf-8").split()
+ results = patchreview(args.directory, patches)
+ analyse(results, want_blame=args.blame, verbose=args.verbose)
+
+ if args.json:
+ import json, os.path, collections
+ if os.path.isfile(args.json):
+ data = json.load(open(args.json))
+ else:
+ data = []
+
+ row = collections.Counter()
+ row["total"] = len(results)
+ row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"]).decode("utf-8").strip()
+ for r in results.values():
+ if r.upstream_status in status_values:
+ row[r.upstream_status] += 1
+ if r.malformed_upstream_status or r.missing_upstream_status:
+ row['malformed-upstream-status'] += 1
+ if r.malformed_sob or r.missing_sob:
+ row['malformed-sob'] += 1
+
+ data.append(row)
+ json.dump(data, open(args.json, "w"))
+
+ if args.histogram:
+ print()
+ histogram(results)
diff --git a/scripts/contrib/patchtest.sh b/scripts/contrib/patchtest.sh
new file mode 100755
index 0000000000..b1e1ea334b
--- /dev/null
+++ b/scripts/contrib/patchtest.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+# patchtest: Run patchtest on commits starting at master
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+set -o errexit
+
+# Default values
+pokydir=''
+
+usage() {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-h] [-p pokydir]
+ -p pokydir Defaults to current directory
+EOM
+>&2
+ exit 1
+}
+
+function clone() {
+ local REPOREMOTE=$1
+ local REPODIR=$2
+ if [ ! -d $REPODIR ]; then
+ git clone $REPOREMOTE $REPODIR --quiet
+ else
+ ( cd $REPODIR; git pull --quiet )
+ fi
+}
+
+while getopts ":p:h" opt; do
+ case $opt in
+ p)
+ pokydir=$OPTARG
+ ;;
+ h)
+ usage
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ usage
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+CDIR="$PWD"
+
+# default pokydir to current directory if user did not specify one
+if [ -z "$pokydir" ]; then
+ pokydir="$CDIR"
+fi
+
+PTENV="$PWD/patchtest"
+PT="$PTENV/patchtest"
+PTOE="$PTENV/patchtest-oe"
+
+if ! which virtualenv > /dev/null; then
+ echo "Install virtualenv before proceeding"
+ exit 1;
+fi
+
+# activate the virtual env
+virtualenv $PTENV --quiet
+source $PTENV/bin/activate
+
+cd $PTENV
+
+# clone or pull
+clone git://git.yoctoproject.org/patchtest $PT
+clone git://git.yoctoproject.org/patchtest-oe $PTOE
+
+# install requirements
+pip install -r $PT/requirements.txt --quiet
+pip install -r $PTOE/requirements.txt --quiet
+
+PATH="$PT:$PT/scripts:$PATH"
+
+# loop through parent to HEAD and execute patchtest on each commit
+for commit in $(git rev-list master..HEAD --reverse)
+do
+ shortlog="$(git log "$commit^1..$commit" --pretty='%h: %aN: %cd: %s')"
+ log="$(git format-patch "$commit^1..$commit" --stdout | patchtest - -r $pokydir -s $PTOE/tests --base-commit $commit^1 --json 2>/dev/null | create-summary --fail --only-results)"
+ if [ -z "$log" ]; then
+ shortlog="$shortlog: OK"
+ else
+ shortlog="$shortlog: FAIL"
+ fi
+ echo "$shortlog"
+ echo "$log" | sed -n -e '/Issue/p' -e '/Suggested fix/p'
+ echo ""
+done
+
+deactivate
+
+cd $CDIR
diff --git a/scripts/contrib/python/generate-manifest-2.7.py b/scripts/contrib/python/generate-manifest-2.7.py
deleted file mode 100755
index f2ecf8d3f5..0000000000
--- a/scripts/contrib/python/generate-manifest-2.7.py
+++ /dev/null
@@ -1,397 +0,0 @@
-#!/usr/bin/env python
-
-# generate Python Manifest for the OpenEmbedded build system
-# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# (C) 2007 Jeremy Laine
-# licensed under MIT, see COPYING.MIT
-#
-# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
-# * Updated to no longer generate special -dbg package, instead use the
-# single system -dbg
-# * Update version with ".1" to indicate this change
-
-import os
-import sys
-import time
-
-VERSION = "2.7.2"
-
-__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
-__version__ = "20110222.2"
-
-class MakefileMaker:
-
- def __init__( self, outfile ):
- """initialize"""
- self.packages = {}
- self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
- self.output = outfile
- self.out( """
-# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
-# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
-""" % ( sys.argv[0], __version__ ) )
-
- #
- # helper functions
- #
-
- def out( self, data ):
- """print a line to the output file"""
- self.output.write( "%s\n" % data )
-
- def setPrefix( self, targetPrefix ):
- """set a file prefix for addPackage files"""
- self.targetPrefix = targetPrefix
-
- def doProlog( self ):
- self.out( """ """ )
- self.out( "" )
-
- def addPackage( self, name, description, dependencies, filenames ):
- """add a package to the Makefile"""
- if type( filenames ) == type( "" ):
- filenames = filenames.split()
- fullFilenames = []
- for filename in filenames:
- if filename[0] != "$":
- fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
- else:
- fullFilenames.append( filename )
- self.packages[name] = description, dependencies, fullFilenames
-
- def doBody( self ):
- """generate body of Makefile"""
-
- global VERSION
-
- #
- # generate provides line
- #
-
- provideLine = 'PROVIDES+="'
- for name in sorted(self.packages):
- provideLine += "%s " % name
- provideLine += '"'
-
- self.out( provideLine )
- self.out( "" )
-
- #
- # generate package line
- #
-
- packageLine = 'PACKAGES="${PN}-dbg '
- for name in sorted(self.packages):
- if name.startswith("${PN}-distutils"):
- if name == "${PN}-distutils":
- packageLine += "%s-staticdev %s " % (name, name)
- elif name != '${PN}-dbg':
- packageLine += "%s " % name
- packageLine += '${PN}-modules"'
-
- self.out( packageLine )
- self.out( "" )
-
- #
- # generate package variables
- #
-
- for name, data in sorted(self.packages.items()):
- desc, deps, files = data
-
- #
- # write out the description, revision and dependencies
- #
- self.out( 'SUMMARY_%s="%s"' % ( name, desc ) )
- self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
-
- line = 'FILES_%s="' % name
-
- #
- # check which directories to make in the temporary directory
- #
-
- dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
- for target in files:
- dirset[os.path.dirname( target )] = True
-
- #
- # generate which files to copy for the target (-dfR because whole directories are also allowed)
- #
-
- for target in files:
- line += "%s " % target
-
- line += '"'
- self.out( line )
- self.out( "" )
-
- self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
- line = 'RDEPENDS_${PN}-modules="'
-
- for name, data in sorted(self.packages.items()):
- if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
- line += "%s " % name
-
- self.out( "%s \"" % line )
- self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
-
- def doEpilog( self ):
- self.out( """""" )
- self.out( "" )
-
- def make( self ):
- self.doProlog()
- self.doBody()
- self.doEpilog()
-
-if __name__ == "__main__":
-
- if len( sys.argv ) > 1:
- try:
- os.unlink(sys.argv[1])
- except Exception:
- sys.exc_clear()
- outfile = open( sys.argv[1], "w" )
- else:
- outfile = sys.stdout
-
- m = MakefileMaker( outfile )
-
- # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
- # Parameters: revision, name, description, dependencies, filenames
- #
-
- m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re",
- "__future__.* _abcoll.* abc.* ast.* copy.* copy_reg.* ConfigParser.* " +
- "genericpath.* getopt.* linecache.* new.* " +
- "os.* posixpath.* struct.* " +
- "warnings.* site.* stat.* " +
- "UserDict.* UserList.* UserString.* " +
- "lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
- "lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
- "_weakrefset.* sysconfig.* _sysconfigdata.* config/Makefile " +
- "${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
- "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
-
- m.addPackage( "${PN}-dev", "Python development package", "${PN}-core",
- "${includedir} " +
- "${libdir}/lib*${SOLIBSDEV} " +
- "${libdir}/*.la " +
- "${libdir}/*.a " +
- "${libdir}/*.o " +
- "${libdir}/pkgconfig " +
- "${base_libdir}/*.a " +
- "${base_libdir}/*.o " +
- "${datadir}/aclocal " +
- "${datadir}/pkgconfig " )
-
- m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
- "${bindir}/2to3 lib2to3" ) # package
-
- m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
- "${bindir}/idle idlelib" ) # package
-
- m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
- "${bindir}/pydoc pydoc.* pydoc_data" )
-
- m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
- "${bindir}/smtpd.* smtpd.*" )
-
- m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
- "wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
-
- m.addPackage( "${PN}-bsddb", "Python bindings for the Berkeley Database", "${PN}-core",
- "bsddb lib-dynload/_bsddb.so" ) # package
-
- m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang",
- "codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
-
- m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
- "py_compile.* compileall.*" )
-
- m.addPackage( "${PN}-compiler", "Python compiler support", "${PN}-core",
- "compiler" ) # package
-
- m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-zlib",
- "gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
-
- m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
- "hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
-
- m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
- "lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
-
- m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
- "curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
-
- m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core",
- "ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
-
- m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
- "_strptime.* calendar.* lib-dynload/datetime.so" )
-
- m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
- "anydbm.* dumbdbm.* whichdb.* " )
-
- m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
- "bdb.* pdb.*" )
-
- m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re",
- "difflib.*" )
-
- m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils",
- "config/lib*.a" ) # package
-
- m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core ${PN}-email",
- "config distutils" ) # package
-
- m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
- "doctest.*" )
-
- m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
- "imaplib.* email" ) # package
-
- m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
- "lib-dynload/fcntl.so" )
-
- m.addPackage( "${PN}-hotshot", "Python hotshot performance profiler", "${PN}-core",
- "hotshot lib-dynload/_hotshot.so" )
-
- m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
- "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
-
- m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core",
- "importlib" )
-
- m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
- "lib-dynload/gdbm.so" )
-
- m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core",
- "colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
-
- m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math ${PN}-textutils ${PN}-netclient ${PN}-contextlib",
- "lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
- "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
-
- m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re ${PN}-codecs",
- "json lib-dynload/_json.so" ) # package
-
- m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core",
- "lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
- "lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
- "atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
- "tokenize.* traceback.* weakref.*" )
-
- m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
- "logging" ) # package
-
- m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
- "mailbox.*" )
-
- m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt",
- "lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
-
- m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
- "mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
-
- m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io",
- "lib-dynload/mmap.so " )
-
- m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
- "lib-dynload/_multiprocessing.so multiprocessing" ) # package
-
- m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
- "*Cookie*.* " +
- "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
-
- m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient ${PN}-shell ${PN}-threading",
- "cgi.* *HTTPServer.* SocketServer.*" )
-
- m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
- "decimal.* fractions.* numbers.*" )
-
- m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
- "pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
-
- m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
- "pkgutil.*")
-
- m.addPackage( "${PN}-plistlib", "Generate and parse Mac OS X .plist files", "${PN}-core ${PN}-datetime ${PN}-io",
- "plistlib.*")
-
- m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io",
- "pprint.*" )
-
- m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils",
- "profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
-
- m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
- "re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
-
- m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core",
- "lib-dynload/readline.so rlcompleter.*" )
-
- m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
- "lib-dynload/resource.so" )
-
- m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re",
- "cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
-
- m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
- "robotparser.*")
-
- m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
- "subprocess.*" )
-
- m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
- "lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
-
- m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3",
- "sqlite3/test" )
-
- m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re",
- "lib-dynload/strop.so string.* stringold.*" )
-
- m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core",
- "lib-dynload/syslog.so" )
-
- m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
- "pty.* tty.*" )
-
- m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
- "test" ) # package
-
- m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
- "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
-
- m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
- "lib-dynload/_tkinter.so lib-tk" ) # package
-
- m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
- "unittest/" )
-
- m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core",
- "lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
-
- m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-re",
- "lib-dynload/_elementtree.so lib-dynload/pyexpat.so xml xmllib.*" ) # package
-
- m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
- "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
-
- m.addPackage( "${PN}-zlib", "Python zlib compression support", "${PN}-core",
- "lib-dynload/zlib.so" )
-
- m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
- "mailbox.*" )
-
- m.addPackage( "${PN}-argparse", "Python command line argument parser", "${PN}-core ${PN}-codecs ${PN}-textutils",
- "argparse.*" )
-
- m.addPackage( "${PN}-contextlib", "Python utilities for with-statement" +
- "contexts.", "${PN}-core",
- "${libdir}/python${PYTHON_MAJMIN}/contextlib.*" )
-
- m.make()
diff --git a/scripts/contrib/python/generate-manifest-3.5.py b/scripts/contrib/python/generate-manifest-3.5.py
deleted file mode 100755
index 2906cc66d0..0000000000
--- a/scripts/contrib/python/generate-manifest-3.5.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/usr/bin/env python
-
-# generate Python Manifest for the OpenEmbedded build system
-# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# (C) 2007 Jeremy Laine
-# licensed under MIT, see COPYING.MIT
-#
-# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
-# * Updated to no longer generate special -dbg package, instead use the
-# single system -dbg
-# * Update version with ".1" to indicate this change
-#
-# 2014 Khem Raj <raj.khem@gmail.com>
-# Added python3 support
-#
-import os
-import sys
-import time
-
-VERSION = "3.5.0"
-
-__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
-__version__ = "20140131"
-
-class MakefileMaker:
-
- def __init__( self, outfile ):
- """initialize"""
- self.packages = {}
- self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
- self.output = outfile
- self.out( """
-# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
-# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
-""" % ( sys.argv[0], __version__ ) )
-
- #
- # helper functions
- #
-
- def out( self, data ):
- """print a line to the output file"""
- self.output.write( "%s\n" % data )
-
- def setPrefix( self, targetPrefix ):
- """set a file prefix for addPackage files"""
- self.targetPrefix = targetPrefix
-
- def doProlog( self ):
- self.out( """ """ )
- self.out( "" )
-
- def addPackage( self, name, description, dependencies, filenames ):
- """add a package to the Makefile"""
- if type( filenames ) == type( "" ):
- filenames = filenames.split()
- fullFilenames = []
- for filename in filenames:
- if filename[0] != "$":
- fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
- else:
- fullFilenames.append( filename )
- self.packages[name] = description, dependencies, fullFilenames
-
- def doBody( self ):
- """generate body of Makefile"""
-
- global VERSION
-
- #
- # generate provides line
- #
-
- provideLine = 'PROVIDES+="'
- for name in sorted(self.packages):
- provideLine += "%s " % name
- provideLine += '"'
-
- self.out( provideLine )
- self.out( "" )
-
- #
- # generate package line
- #
-
- packageLine = 'PACKAGES="${PN}-dbg '
- for name in sorted(self.packages):
- if name.startswith("${PN}-distutils"):
- if name == "${PN}-distutils":
- packageLine += "%s-staticdev %s " % (name, name)
- elif name != '${PN}-dbg':
- packageLine += "%s " % name
- packageLine += '${PN}-modules"'
-
- self.out( packageLine )
- self.out( "" )
-
- #
- # generate package variables
- #
-
- for name, data in sorted(self.packages.items()):
- desc, deps, files = data
-
- #
- # write out the description, revision and dependencies
- #
- self.out( 'SUMMARY_%s="%s"' % ( name, desc ) )
- self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
-
- line = 'FILES_%s="' % name
-
- #
- # check which directories to make in the temporary directory
- #
-
- dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
- for target in files:
- dirset[os.path.dirname( target )] = True
-
- #
- # generate which files to copy for the target (-dfR because whole directories are also allowed)
- #
-
- for target in files:
- line += "%s " % target
-
- line += '"'
- self.out( line )
- self.out( "" )
-
- self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
- line = 'RDEPENDS_${PN}-modules="'
-
- for name, data in sorted(self.packages.items()):
- if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
- line += "%s " % name
-
- self.out( "%s \"" % line )
- self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
-
- def doEpilog( self ):
- self.out( """""" )
- self.out( "" )
-
- def make( self ):
- self.doProlog()
- self.doBody()
- self.doEpilog()
-
-if __name__ == "__main__":
-
- if len( sys.argv ) > 1:
- try:
- os.unlink(sys.argv[1])
- except Exception:
- sys.exc_clear()
- outfile = open( sys.argv[1], "w" )
- else:
- outfile = sys.stdout
-
- m = MakefileMaker( outfile )
-
- # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
- # Parameters: revision, name, description, dependencies, filenames
- #
-
- m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re ${PN}-reprlib ${PN}-codecs ${PN}-io ${PN}-math",
- "__future__.* _abcoll.* abc.* ast.* copy.* copyreg.* configparser.* " +
- "genericpath.* getopt.* linecache.* new.* " +
- "os.* posixpath.* struct.* " +
- "warnings.* site.* stat.* " +
- "UserDict.* UserList.* UserString.* " +
- "lib-dynload/binascii.*.so lib-dynload/_struct.*.so lib-dynload/time.*.so " +
- "lib-dynload/xreadlines.*.so types.* platform.* ${bindir}/python* " +
- "_weakrefset.* sysconfig.* _sysconfigdata.* config/Makefile " +
- "${includedir}/python${PYTHON_BINABI}/pyconfig*.h " +
- "${libdir}/python${PYTHON_MAJMIN}/collections " +
- "${libdir}/python${PYTHON_MAJMIN}/_collections_abc.* " +
- "${libdir}/python${PYTHON_MAJMIN}/_sitebuiltins.* " +
- "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
-
- m.addPackage( "${PN}-dev", "Python development package", "${PN}-core",
- "${includedir} " +
- "${libdir}/lib*${SOLIBSDEV} " +
- "${libdir}/*.la " +
- "${libdir}/*.a " +
- "${libdir}/*.o " +
- "${libdir}/pkgconfig " +
- "${base_libdir}/*.a " +
- "${base_libdir}/*.o " +
- "${datadir}/aclocal " +
- "${datadir}/pkgconfig " )
-
- m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
- "lib2to3" ) # package
-
- m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
- "${bindir}/idle idlelib" ) # package
-
- m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
- "${bindir}/pydoc pydoc.* pydoc_data" )
-
- m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
- "${bindir}/smtpd.* smtpd.*" )
-
- m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
- "wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.*.so lib-dynload/audioop.*.so audiodev.* sunaudio.* sunau.* toaiff.*" )
-
- m.addPackage( "${PN}-argparse", "Python command line argument parser", "${PN}-core ${PN}-codecs ${PN}-textutils",
- "argparse.*" )
-
- m.addPackage( "${PN}-asyncio", "Python Asynchronous I/O, event loop, coroutines and tasks", "${PN}-core",
- "asyncio" )
-
- m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang",
- "codecs.* encodings gettext.* locale.* lib-dynload/_locale.*.so lib-dynload/_codecs* lib-dynload/_multibytecodec.*.so lib-dynload/unicodedata.*.so stringprep.* xdrlib.*" )
-
- m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
- "py_compile.* compileall.*" )
-
- m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-codecs ${PN}-importlib ${PN}-threading ${PN}-shell",
- "gzip.* zipfile.* tarfile.* lib-dynload/bz2.*.so lib-dynload/zlib.*.so" )
-
- m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
- "hashlib.* md5.* sha.* lib-dynload/crypt.*.so lib-dynload/_hashlib.*.so lib-dynload/_sha256.*.so lib-dynload/_sha512.*.so" )
-
- m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
- "lib-dynload/_csv.*.so csv.* optparse.* textwrap.*" )
-
- m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
- "curses lib-dynload/_curses.*.so lib-dynload/_curses_panel.*.so" ) # directory + low level module
-
- m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core ${PN}-subprocess",
- "ctypes lib-dynload/_ctypes.*.so lib-dynload/_ctypes_test.*.so" ) # directory + low level module
-
- m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
- "_strptime.* calendar.* datetime.* lib-dynload/_datetime.*.so" )
-
- m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
- "anydbm.* dumbdbm.* whichdb.* dbm lib-dynload/_dbm.*.so" )
-
- m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint ${PN}-importlib ${PN}-pkgutil",
- "bdb.* pdb.*" )
-
- m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re",
- "difflib.*" )
-
- m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils",
- "config/lib*.a" ) # package
-
- m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core ${PN}-email",
- "config distutils" ) # package
-
- m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
- "doctest.*" )
-
- m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
- "imaplib.* email" ) # package
-
- m.addPackage( "${PN}-enum", "Python support for enumerations", "${PN}-core",
- "enum.*" )
-
- m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
- "lib-dynload/fcntl.*.so" )
-
- m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
- "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
-
- m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core ${PN}-lang",
- "importlib" )
-
- m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
- "lib-dynload/_gdbm.*.so" )
-
- m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core",
- "colorsys.* imghdr.* lib-dynload/imageop.*.so lib-dynload/rgbimg.*.so" )
-
- m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math",
- "lib-dynload/_socket.*.so lib-dynload/_io.*.so lib-dynload/_ssl.*.so lib-dynload/select.*.so lib-dynload/termios.*.so lib-dynload/cStringIO.*.so " +
- "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
-
- m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
- "json lib-dynload/_json.*.so" ) # package
-
- m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core ${PN}-importlib",
- "lib-dynload/_bisect.*.so lib-dynload/_collections.*.so lib-dynload/_heapq.*.so lib-dynload/_weakref.*.so lib-dynload/_functools.*.so " +
- "lib-dynload/array.*.so lib-dynload/itertools.*.so lib-dynload/operator.*.so lib-dynload/parser.*.so " +
- "atexit.* bisect.* code.* codeop.* collections.* _collections_abc.* contextlib.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* operator.* symbol.* repr.* token.* " +
- "tokenize.* traceback.* weakref.*" )
-
- m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
- "logging" ) # package
-
- m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
- "mailbox.*" )
-
- m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt",
- "lib-dynload/cmath.*.so lib-dynload/math.*.so lib-dynload/_random.*.so random.* sets.*" )
-
- m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
- "mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
-
- m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io",
- "lib-dynload/mmap.*.so " )
-
- m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
- "lib-dynload/_multiprocessing.*.so multiprocessing" ) # package
-
- m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
- "*Cookie*.* " +
- "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib uuid.* rfc822.* mimetools.*" )
-
- m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient ${PN}-shell ${PN}-threading",
- "cgi.* *HTTPServer.* SocketServer.*" )
-
- m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
- "decimal.* fractions.* numbers.*" )
-
- m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
- "pickle.* shelve.* lib-dynload/cPickle.*.so pickletools.*" )
-
- m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
- "pkgutil.*")
-
- m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io",
- "pprint.*" )
-
- m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils",
- "profile.* pstats.* cProfile.* lib-dynload/_lsprof.*.so" )
-
- m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
- "re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
-
- m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core",
- "lib-dynload/readline.*.so rlcompleter.*" )
-
- m.addPackage( "${PN}-reprlib", "Python alternate repr() implementation", "${PN}-core",
- "reprlib.py" )
-
- m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
- "lib-dynload/resource.*.so" )
-
- m.addPackage( "${PN}-selectors", "Python High-level I/O multiplexing", "${PN}-core",
- "selectors.*" )
-
- m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re ${PN}-compression",
- "cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
-
- m.addPackage( "${PN}-signal", "Python set handlers for asynchronous events support", "${PN}-core ${PN}-enum",
- "signal.*" )
-
- m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle ${PN}-threading ${PN}-signal ${PN}-selectors",
- "subprocess.* lib-dynload/_posixsubprocess.*.so" )
-
- m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading",
- "lib-dynload/_sqlite3.*.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
-
- m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3",
- "sqlite3/test" )
-
- m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re",
- "lib-dynload/strop.*.so string.* stringold.*" )
-
- m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core",
- "lib-dynload/syslog.*.so" )
-
- m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
- "pty.* tty.*" )
-
- m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
- "test" ) # package
-
- m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
- "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* queue.*" )
-
- m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
- "lib-dynload/_tkinter.*.so lib-tk tkinter" ) # package
-
- m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
- "unittest/" )
-
- m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core",
- "lib-dynload/nis.*.so lib-dynload/grp.*.so lib-dynload/pwd.*.so getpass.*" )
-
- m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-re",
- "lib-dynload/_elementtree.*.so lib-dynload/pyexpat.*.so xml xmllib.*" ) # package
-
- m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
- "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.* xmlrpc" )
-
- m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
- "mailbox.*" )
-
- m.make()
diff --git a/scripts/contrib/serdevtry b/scripts/contrib/serdevtry
index 74bd7b7161..9144730e7e 100755
--- a/scripts/contrib/serdevtry
+++ b/scripts/contrib/serdevtry
@@ -2,7 +2,8 @@
# Copyright (C) 2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
if [ "$1" = "" -o "$1" = "--help" ] ; then
echo "Usage: $0 <serial terminal command>"
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh
index 9e5725ae54..23f238adf6 100755
--- a/scripts/contrib/test_build_time.sh
+++ b/scripts/contrib/test_build_time.sh
@@ -3,22 +3,8 @@
# Build performance regression test script
#
# Copyright 2011 Intel Corporation
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to be used in conjunction with "git bisect run"
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh
index 8e20a9ea7d..478e8b0d03 100755
--- a/scripts/contrib/test_build_time_worker.sh
+++ b/scripts/contrib/test_build_time_worker.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This is an example script to be used in conjunction with test_build_time.sh
if [ "$TEST_BUILDDIR" = "" ] ; then
diff --git a/scripts/contrib/uncovered b/scripts/contrib/uncovered
index a8399ad170..f16128cb7a 100755
--- a/scripts/contrib/uncovered
+++ b/scripts/contrib/uncovered
@@ -1,23 +1,10 @@
#!/bin/bash -eur
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Find python modules uncovered by oe-seltest
#
# Copyright (c) 2016, Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
#
diff --git a/scripts/contrib/verify-homepage.py b/scripts/contrib/verify-homepage.py
index d39dd1d973..7bffa78e23 100755
--- a/scripts/contrib/verify-homepage.py
+++ b/scripts/contrib/verify-homepage.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This script can be used to verify HOMEPAGE values for all recipes in
# the current configuration.
# The result is influenced by network environment, since the timeout of connect url is 5 seconds as default.
@@ -27,7 +29,7 @@ logger = scriptutils.logger_create('verify_homepage')
def wgetHomepage(pn, homepage):
result = subprocess.call('wget ' + '-q -T 5 -t 1 --spider ' + homepage, shell = True)
if result:
- logger.warn("%s: failed to verify HOMEPAGE: %s " % (pn, homepage))
+ logger.warning("%s: failed to verify HOMEPAGE: %s " % (pn, homepage))
return 1
else:
return 0
@@ -44,7 +46,7 @@ def verifyHomepage(bbhandler):
if realfn in checked:
continue
data = bbhandler.parse_recipe_file(realfn)
- homepage = data.getVar("HOMEPAGE", True)
+ homepage = data.getVar("HOMEPAGE")
if homepage:
try:
urllib.request.urlopen(homepage, timeout=5)
diff --git a/scripts/cp-noerror b/scripts/cp-noerror
index 35eb211be3..ab617c5d35 100755
--- a/scripts/cp-noerror
+++ b/scripts/cp-noerror
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
# don't error.
# Also don't error if $1 disappears.
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index a88f35a4a3..8eefcf63a5 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -1,21 +1,8 @@
#!/bin/sh
#
# Copyright (c) 2010-2013, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -34,7 +21,7 @@ RFC=0
usage() {
CMD=$(basename $0)
cat <<EOM
-Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to] [-i commit_id] [-d relative_dir] -u remote [-b branch]
+Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to] [-i commit_id] [-d relative_dir] -u remote [-b branch] [-- <format-patch options>]
-b branch Branch name in the specified remote (default: current branch)
-l local branch Local branch name (default: HEAD)
-c Create an RFC (Request for Comment) patch series
@@ -57,6 +44,7 @@ Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to
$CMD -u contrib -r master -i misc -b nitin/misc -o pull-misc
$CMD -u contrib -p "RFC PATCH" -b nitin/experimental
$CMD -u contrib -i misc -b nitin/misc -d ./bitbake
+ $CMD -u contrib -r origin/master -o /tmp/out.v3 -- -v3 --in-reply-to=20170511120134.XX7799@site.com
EOM
}
@@ -108,9 +96,16 @@ while getopts "b:acd:hi:m:o:p:r:s:u:l:" OPT; do
a)
CPR_CONTRIB_AUTO_PUSH="1"
;;
+ --)
+ shift
+ break
+ ;;
esac
done
+shift "$((OPTIND - 1))"
+extraopts="$@"
+
if [ -z "$REMOTE" ]; then
echo "ERROR: Missing parameter -u or CPR_CONTRIB_REMOTE in env, no git remote!"
usage
@@ -128,20 +123,12 @@ fi
# Rewrite private URLs to public URLs
# Determine the repository name for use in the WEB_URL later
-case "$REMOTE_URL" in
-*@*)
- USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
- PROTO_RE="[a-z][a-z+]*://"
- GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
- REMOTE_URL=${REMOTE_URL%.git}
- REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
- REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
- ;;
-*)
- echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
- echo " The pull and browse URLs will likely be incorrect"
- ;;
-esac
+USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+PROTO_RE="[a-z][a-z+]*://"
+GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
+REMOTE_URL=${REMOTE_URL%.git}
+REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -168,7 +155,7 @@ case "$REMOTE_URL" in
WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
;;
*git.openembedded.org*)
- WEB_URL="http://cgit.openembedded.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*github.com*)
WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
@@ -201,7 +188,7 @@ if [ -n "$RELDIR" ]; then
ODIR=$(realpath $ODIR)
pdir=$(pwd)
cd $RELDIR
- extraopts="--relative"
+ extraopts="$extraopts --relative"
fi
# Generate the patches and cover letter
@@ -218,7 +205,7 @@ fi
[ -n "$RELDIR" ] && cd $pdir
# Customize the cover letter
-CL="$ODIR/0000-cover-letter.patch"
+CL="$(echo $ODIR/*0000-cover-letter.patch)"
PM="$ODIR/pull-msg"
GIT_VERSION=$(`git --version` | tr -d '[:alpha:][:space:].' | sed 's/\(...\).*/\1/')
NEWER_GIT_VERSION=210
@@ -270,7 +257,7 @@ fi
# Replace the SUBJECT token with it.
if [ -n "$SUBJECT" ]; then
- sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+ sed -i -e "s\`\*\*\* SUBJECT HERE \*\*\*\`$SUBJECT\`" "$CL"
fi
diff --git a/scripts/crosstap b/scripts/crosstap
index 58317cf91c..40856bc208 100755
--- a/scripts/crosstap
+++ b/scripts/crosstap
@@ -1,148 +1,458 @@
-#!/bin/bash
+#!/usr/bin/env python3
#
-# Run a systemtap script on remote target
+# Build a systemtap script for a given image, kernel
#
-# Examples (run on build host, target is 192.168.1.xxx):
-# $ source oe-init-build-env"
-# $ cd ~/my/systemtap/scripts"
+# Effectively script extracts needed information from set of
+# 'bitbake -e' commands and contructs proper invocation of stap on
+# host to build systemtap script for a given target.
#
-# $ crosstap root@192.168.1.xxx myscript.stp"
-# $ crosstap root@192.168.1.xxx myscript-with-args.stp 99 ninetynine"
+# By default script will compile scriptname.ko that could be copied
+# to taget and activated with 'staprun scriptname.ko' command. Or if
+# --remote user@hostname option is specified script will build, load
+# execute script on target.
#
-# Copyright (c) 2012, Intel Corporation.
-# All rights reserved.
+# This script is very similar and inspired by crosstap shell script.
+# The major difference that this script supports user-land related
+# systemtap script, whereas crosstap could deal only with scripts
+# related to kernel.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (c) 2018, Cisco Systems.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-function usage() {
- echo "Usage: $0 <user@hostname> <sytemtap-script> [additional systemtap-script args]"
-}
-
-function setup_usage() {
- echo ""
- echo "'crosstap' requires a local sdk build of the target system"
- echo "(or a build that includes 'tools-profile') in order to build"
- echo "kernel modules that can probe the target system."
- echo ""
- echo "Practically speaking, that means you need to do the following:"
- echo " - If you're running a pre-built image, download the release"
- echo " and/or BSP tarballs used to build the image."
- echo " - If you're working from git sources, just clone the metadata"
- echo " and BSP layers needed to build the image you'll be booting."
- echo " - Make sure you're properly set up to build a new image (see"
- echo " the BSP README and/or the widely available basic documentation"
- echo " that discusses how to build images)."
- echo " - Build an -sdk version of the image e.g.:"
- echo " $ bitbake core-image-sato-sdk"
- echo " OR"
- echo " - Build a non-sdk image but include the profiling tools:"
- echo " [ edit local.conf and add 'tools-profile' to the end of"
- echo " the EXTRA_IMAGE_FEATURES variable ]"
- echo " $ bitbake core-image-sato"
- echo ""
- echo " [ NOTE that 'crosstap' needs to be able to ssh into the target"
- echo " system, which isn't enabled by default in -minimal images. ]"
- echo ""
- echo "Once you've build the image on the host system, you're ready to"
- echo "boot it (or the equivalent pre-built image) and use 'crosstap'"
- echo "to probe it (you need to source the environment as usual first):"
- echo ""
- echo " $ source oe-init-build-env"
- echo " $ cd ~/my/systemtap/scripts"
- echo " $ crosstap root@192.168.1.xxx myscript.stp"
- echo ""
-}
-
-function systemtap_target_arch() {
- SYSTEMTAP_TARGET_ARCH=$1
- case $SYSTEMTAP_TARGET_ARCH in
- i?86)
- SYSTEMTAP_TARGET_ARCH="i386"
- ;;
- x86?64*)
- SYSTEMTAP_TARGET_ARCH="x86_64"
- ;;
- arm*)
- SYSTEMTAP_TARGET_ARCH="arm"
- ;;
- powerpc*)
- SYSTEMTAP_TARGET_ARCH="powerpc"
- ;;
- *)
- ;;
- esac
-}
-
-if [ $# -lt 2 ]; then
- usage
- exit 1
-fi
-
-if [ -z "$BUILDDIR" ]; then
- echo "Error: Unable to find the BUILDDIR environment variable."
- echo "Did you forget to source your build system environment setup script?"
- exit 1
-fi
-
-pushd $PWD
-cd $BUILDDIR
-BITBAKE_VARS=`bitbake -e virtual/kernel`
-popd
-
-STAGING_BINDIR_TOOLCHAIN=$(echo "$BITBAKE_VARS" | grep ^STAGING_BINDIR_TOOLCHAIN \
- | cut -d '=' -f2 | cut -d '"' -f2)
-STAGING_BINDIR_TOOLPREFIX=$(echo "$BITBAKE_VARS" | grep ^TARGET_PREFIX \
- | cut -d '=' -f2 | cut -d '"' -f2)
-SYSTEMTAP_HOST_INSTALLDIR=$(echo "$BITBAKE_VARS" | grep ^STAGING_DIR_NATIVE \
- | cut -d '=' -f2 | cut -d '"' -f2)
-TARGET_ARCH=$(echo "$BITBAKE_VARS" | grep ^TRANSLATED_TARGET_ARCH \
- | cut -d '=' -f2 | cut -d '"' -f2)
-TARGET_KERNEL_BUILDDIR=$(echo "$BITBAKE_VARS" | grep ^B= \
- | cut -d '=' -f2 | cut -d '"' -f2)
-
-systemtap_target_arch "$TARGET_ARCH"
-
-if [ ! -d $TARGET_KERNEL_BUILDDIR ] ||
- [ ! -f $TARGET_KERNEL_BUILDDIR/vmlinux ]; then
- echo -e "\nError: No target kernel build found."
- echo -e "Did you forget to create a local build of your image?"
- setup_usage
- exit 1
-fi
-
-if [ ! -f $SYSTEMTAP_HOST_INSTALLDIR/usr/bin/stap ]; then
- echo -e "\nError: Native (host) systemtap not found."
- echo -e "Did you accidentally build a local non-sdk image? (or forget to"
- echo -e "add 'tools-profile' to EXTRA_IMAGE_FEATURES in your local.conf)?"
- setup_usage
- exit 1
-fi
-
-target_user_hostname="$1"
-full_script_name="$2"
-script_name=$(basename "$2")
-script_base=${script_name%.*}
-shift 2
-
-${SYSTEMTAP_HOST_INSTALLDIR}/usr/bin/stap \
- -a ${SYSTEMTAP_TARGET_ARCH} \
- -B CROSS_COMPILE="${STAGING_BINDIR_TOOLCHAIN}/${STAGING_BINDIR_TOOLPREFIX}" \
- -r ${TARGET_KERNEL_BUILDDIR} \
- -I ${SYSTEMTAP_HOST_INSTALLDIR}/usr/share/systemtap/tapset \
- -R ${SYSTEMTAP_HOST_INSTALLDIR}/usr/share/systemtap/runtime \
- --remote=$target_user_hostname \
- -m $script_base \
- $full_script_name "$@"
-
-exit 0
+
+import sys
+import re
+import subprocess
+import os
+import optparse
+
+class Stap(object):
+ def __init__(self, script, module, remote):
+ self.script = script
+ self.module = module
+ self.remote = remote
+ self.stap = None
+ self.sysroot = None
+ self.runtime = None
+ self.tapset = None
+ self.arch = None
+ self.cross_compile = None
+ self.kernel_release = None
+ self.target_path = None
+ self.target_ld_library_path = None
+
+ if not self.remote:
+ if not self.module:
+ # derive module name from script
+ self.module = os.path.basename(self.script)
+ if self.module[-4:] == ".stp":
+ self.module = self.module[:-4]
+ # replace - if any with _
+ self.module = self.module.replace("-", "_")
+
+ def command(self, args):
+ ret = []
+ ret.append(self.stap)
+
+ if self.remote:
+ ret.append("--remote")
+ ret.append(self.remote)
+ else:
+ ret.append("-p4")
+ ret.append("-m")
+ ret.append(self.module)
+
+ ret.append("-a")
+ ret.append(self.arch)
+
+ ret.append("-B")
+ ret.append("CROSS_COMPILE=" + self.cross_compile)
+
+ ret.append("-r")
+ ret.append(self.kernel_release)
+
+ ret.append("-I")
+ ret.append(self.tapset)
+
+ ret.append("-R")
+ ret.append(self.runtime)
+
+ if self.sysroot:
+ ret.append("--sysroot")
+ ret.append(self.sysroot)
+
+ ret.append("--sysenv=PATH=" + self.target_path)
+ ret.append("--sysenv=LD_LIBRARY_PATH=" + self.target_ld_library_path)
+
+ ret = ret + args
+
+ ret.append(self.script)
+ return ret
+
+ def additional_environment(self):
+ ret = {}
+ ret["SYSTEMTAP_DEBUGINFO_PATH"] = "+:.debug:build"
+ return ret
+
+ def environment(self):
+ ret = os.environ.copy()
+ additional = self.additional_environment()
+ for e in additional:
+ ret[e] = additional[e]
+ return ret
+
+ def display_command(self, args):
+ additional_env = self.additional_environment()
+ command = self.command(args)
+
+ print("#!/bin/sh")
+ for e in additional_env:
+ print("export %s=\"%s\"" % (e, additional_env[e]))
+ print(" ".join(command))
+
+class BitbakeEnvInvocationException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+class BitbakeEnv(object):
+ BITBAKE="bitbake"
+
+ def __init__(self, package):
+ self.package = package
+ self.cmd = BitbakeEnv.BITBAKE + " -e " + self.package
+ self.popen = subprocess.Popen(self.cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ self.__lines = self.popen.stdout.readlines()
+ self.popen.wait()
+
+ self.lines = []
+ for line in self.__lines:
+ self.lines.append(line.decode('utf-8'))
+
+ def get_vars(self, vars):
+ if self.popen.returncode:
+ raise BitbakeEnvInvocationException(
+ "\nFailed to execute '" + self.cmd +
+ "' with the following message:\n" +
+ ''.join(self.lines))
+
+ search_patterns = []
+ retdict = {}
+ for var in vars:
+ # regular not exported variable
+ rexpr = "^" + var + "=\"(.*)\""
+ re_compiled = re.compile(rexpr)
+ search_patterns.append((var, re_compiled))
+
+ # exported variable
+ rexpr = "^export " + var + "=\"(.*)\""
+ re_compiled = re.compile(rexpr)
+ search_patterns.append((var, re_compiled))
+
+ for line in self.lines:
+ for var, rexpr in search_patterns:
+ m = rexpr.match(line)
+ if m:
+ value = m.group(1)
+ retdict[var] = value
+
+ # fill variables values in order how they were requested
+ ret = []
+ for var in vars:
+ ret.append(retdict.get(var))
+
+ # if it is single value list return it as scalar, not the list
+ if len(ret) == 1:
+ ret = ret[0]
+
+ return ret
+
+class ParamDiscovery(object):
+ SYMBOLS_CHECK_MESSAGE = """
+WARNING: image '%s' does not have dbg-pkgs IMAGE_FEATURES enabled and no
+"image-combined-dbg" in inherited classes is specified. As result the image
+does not have symbols for user-land processes DWARF based probes. Consider
+adding 'dbg-pkgs' to EXTRA_IMAGE_FEATURES or adding "image-combined-dbg" to
+USER_CLASSES. I.e add this line 'USER_CLASSES += "image-combined-dbg"' to
+local.conf file.
+
+Or you may use IMAGE_GEN_DEBUGFS="1" option, and then after build you need
+recombine/unpack image and image-dbg tarballs and pass resulting dir location
+with --sysroot option.
+"""
+
+ def __init__(self, image):
+ self.image = image
+
+ self.image_rootfs = None
+ self.image_features = None
+ self.image_gen_debugfs = None
+ self.inherit = None
+ self.base_bindir = None
+ self.base_sbindir = None
+ self.base_libdir = None
+ self.bindir = None
+ self.sbindir = None
+ self.libdir = None
+
+ self.staging_bindir_toolchain = None
+ self.target_prefix = None
+ self.target_arch = None
+ self.target_kernel_builddir = None
+
+ self.staging_dir_native = None
+
+ self.image_combined_dbg = False
+
+ def discover(self):
+ if self.image:
+ benv_image = BitbakeEnv(self.image)
+ (self.image_rootfs,
+ self.image_features,
+ self.image_gen_debugfs,
+ self.inherit,
+ self.base_bindir,
+ self.base_sbindir,
+ self.base_libdir,
+ self.bindir,
+ self.sbindir,
+ self.libdir
+ ) = benv_image.get_vars(
+ ("IMAGE_ROOTFS",
+ "IMAGE_FEATURES",
+ "IMAGE_GEN_DEBUGFS",
+ "INHERIT",
+ "base_bindir",
+ "base_sbindir",
+ "base_libdir",
+ "bindir",
+ "sbindir",
+ "libdir"
+ ))
+
+ benv_kernel = BitbakeEnv("virtual/kernel")
+ (self.staging_bindir_toolchain,
+ self.target_prefix,
+ self.target_arch,
+ self.target_kernel_builddir
+ ) = benv_kernel.get_vars(
+ ("STAGING_BINDIR_TOOLCHAIN",
+ "TARGET_PREFIX",
+ "TRANSLATED_TARGET_ARCH",
+ "B"
+ ))
+
+ benv_systemtap = BitbakeEnv("systemtap-native")
+ (self.staging_dir_native
+ ) = benv_systemtap.get_vars(["STAGING_DIR_NATIVE"])
+
+ if self.inherit:
+ if "image-combined-dbg" in self.inherit.split():
+ self.image_combined_dbg = True
+
+ def check(self, sysroot_option):
+ ret = True
+ if self.image_rootfs:
+ sysroot = self.image_rootfs
+ if not os.path.isdir(self.image_rootfs):
+ print("ERROR: Cannot find '" + sysroot +
+ "' directory. Was '" + self.image + "' image built?")
+ ret = False
+
+ stap = self.staging_dir_native + "/usr/bin/stap"
+ if not os.path.isfile(stap):
+ print("ERROR: Cannot find '" + stap +
+ "'. Was 'systemtap-native' built?")
+ ret = False
+
+ if not os.path.isdir(self.target_kernel_builddir):
+ print("ERROR: Cannot find '" + self.target_kernel_builddir +
+ "' directory. Was 'kernel/virtual' built?")
+ ret = False
+
+ if not sysroot_option and self.image_rootfs:
+ dbg_pkgs_found = False
+
+ if self.image_features:
+ image_features = self.image_features.split()
+ if "dbg-pkgs" in image_features:
+ dbg_pkgs_found = True
+
+ if not dbg_pkgs_found \
+ and not self.image_combined_dbg:
+ print(ParamDiscovery.SYMBOLS_CHECK_MESSAGE % (self.image))
+
+ if not ret:
+ print("")
+
+ return ret
+
+ def __map_systemtap_arch(self):
+ a = self.target_arch
+ ret = a
+ if re.match('(athlon|x86.64)$', a):
+ ret = 'x86_64'
+ elif re.match('i.86$', a):
+ ret = 'i386'
+ elif re.match('arm$', a):
+ ret = 'arm'
+ elif re.match('aarch64$', a):
+ ret = 'arm64'
+ elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a):
+ ret = 'mips'
+ elif re.match('p(pc|owerpc)(|64)', a):
+ ret = 'powerpc'
+ return ret
+
+ def fill_stap(self, stap):
+ stap.stap = self.staging_dir_native + "/usr/bin/stap"
+ if not stap.sysroot:
+ if self.image_rootfs:
+ if self.image_combined_dbg:
+ stap.sysroot = self.image_rootfs + "-dbg"
+ else:
+ stap.sysroot = self.image_rootfs
+ stap.runtime = self.staging_dir_native + "/usr/share/systemtap/runtime"
+ stap.tapset = self.staging_dir_native + "/usr/share/systemtap/tapset"
+ stap.arch = self.__map_systemtap_arch()
+ stap.cross_compile = self.staging_bindir_toolchain + "/" + \
+ self.target_prefix
+ stap.kernel_release = self.target_kernel_builddir
+
+ # do we have standard that tells in which order these need to appear
+ target_path = []
+ if self.sbindir:
+ target_path.append(self.sbindir)
+ if self.bindir:
+ target_path.append(self.bindir)
+ if self.base_sbindir:
+ target_path.append(self.base_sbindir)
+ if self.base_bindir:
+ target_path.append(self.base_bindir)
+ stap.target_path = ":".join(target_path)
+
+ target_ld_library_path = []
+ if self.libdir:
+ target_ld_library_path.append(self.libdir)
+ if self.base_libdir:
+ target_ld_library_path.append(self.base_libdir)
+ stap.target_ld_library_path = ":".join(target_ld_library_path)
+
+
+def main():
+ usage = """usage: %prog -s <systemtap-script> [options] [-- [systemtap options]]
+
+%prog cross compile given SystemTap script against given image, kernel
+
+It needs to run in environtment set for bitbake - it uses bitbake -e
+invocations to retrieve information to construct proper stap cross build
+invocation arguments. It assumes that systemtap-native is built in given
+bitbake workspace.
+
+Anything after -- option is passed directly to stap.
+
+Legacy script invocation style supported but depreciated:
+ %prog <user@hostname> <sytemtap-script> [systemtap options]
+
+To enable most out of systemtap the following site.conf or local.conf
+configuration is recommended:
+
+# enables symbol + target binaries rootfs-dbg in workspace
+IMAGE_GEN_DEBUGFS = "1"
+IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
+USER_CLASSES += "image-combined-dbg"
+
+# enables kernel debug symbols
+KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+
+# minimal, just run-time systemtap configuration in target image
+PACKAGECONFIG_pn-systemtap = "monitor"
+
+# add systemtap run-time into target image if it is not there yet
+IMAGE_INSTALL_append = " systemtap"
+"""
+ option_parser = optparse.OptionParser(usage=usage)
+
+ option_parser.add_option("-s", "--script", dest="script",
+ help="specify input script FILE name",
+ metavar="FILE")
+
+ option_parser.add_option("-i", "--image", dest="image",
+ help="specify image name for which script should be compiled")
+
+ option_parser.add_option("-r", "--remote", dest="remote",
+ help="specify username@hostname of remote target to run script "
+ "optional, it assumes that remote target can be accessed through ssh")
+
+ option_parser.add_option("-m", "--module", dest="module",
+ help="specify module name, optional, has effect only if --remote is not used, "
+ "if not specified module name will be derived from passed script name")
+
+ option_parser.add_option("-y", "--sysroot", dest="sysroot",
+ help="explicitely specify image sysroot location. May need to use it in case "
+ "when IMAGE_GEN_DEBUGFS=\"1\" option is used and recombined with symbols "
+ "in different location",
+ metavar="DIR")
+
+ option_parser.add_option("-o", "--out", dest="out",
+ action="store_true",
+ help="output shell script that equvivalent invocation of this script with "
+ "given set of arguments, in given bitbake environment. It could be stored in "
+ "separate shell script and could be repeated without incuring bitbake -e "
+ "invocation overhead",
+ default=False)
+
+ option_parser.add_option("-d", "--debug", dest="debug",
+ action="store_true",
+ help="enable debug output. Use this option to see resulting stap invocation",
+ default=False)
+
+ # is invocation follow syntax from orignal crosstap shell script
+ legacy_args = False
+
+ # check if we called the legacy way
+ if len(sys.argv) >= 3:
+ if sys.argv[1].find("@") != -1 and os.path.exists(sys.argv[2]):
+ legacy_args = True
+
+ # fill options values for legacy invocation case
+ options = optparse.Values
+ options.script = sys.argv[2]
+ options.remote = sys.argv[1]
+ options.image = None
+ options.module = None
+ options.sysroot = None
+ options.out = None
+ options.debug = None
+ remaining_args = sys.argv[3:]
+
+ if not legacy_args:
+ (options, remaining_args) = option_parser.parse_args()
+
+ if not options.script or not os.path.exists(options.script):
+ print("'-s FILE' option is missing\n")
+ option_parser.print_help()
+ else:
+ stap = Stap(options.script, options.module, options.remote)
+ discovery = ParamDiscovery(options.image)
+ discovery.discover()
+ if not discovery.check(options.sysroot):
+ option_parser.print_help()
+ else:
+ stap.sysroot = options.sysroot
+ discovery.fill_stap(stap)
+
+ if options.out:
+ stap.display_command(remaining_args)
+ else:
+ cmd = stap.command(remaining_args)
+ env = stap.environment()
+
+ if options.debug:
+ print(" ".join(cmd))
+
+ os.execve(cmd[0], cmd, env)
+
+main()
diff --git a/scripts/devtool b/scripts/devtool
index 0c32c502a3..8a4f41bc37 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -51,7 +41,7 @@ class ConfigHandler(object):
def __init__(self, filename):
self.config_file = filename
- self.config_obj = configparser.SafeConfigParser()
+ self.config_obj = configparser.ConfigParser()
def get(self, section, option, default=None):
try:
@@ -113,41 +103,30 @@ def read_workspace():
externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$')
for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
with open(fn, 'r') as f:
+ pnvalues = {}
for line in f:
res = externalsrc_re.match(line.rstrip())
if res:
- pn = res.group(2) or os.path.splitext(os.path.basename(fn))[0].split('_')[0]
+ recipepn = os.path.splitext(os.path.basename(fn))[0].split('_')[0]
+ pn = res.group(2) or recipepn
# Find the recipe file within the workspace, if any
bbfile = os.path.basename(fn).replace('.bbappend', '.bb').replace('%', '*')
recipefile = glob.glob(os.path.join(config.workspace_path,
'recipes',
- pn,
+ recipepn,
bbfile))
if recipefile:
recipefile = recipefile[0]
- workspace[pn] = {'srctree': res.group(3),
- 'bbappend': fn,
- 'recipefile': recipefile}
- logger.debug('Found recipe %s' % workspace[pn])
-
-def create_unlockedsigs():
- """ This function will make unlocked-sigs.inc match the recipes in the
- workspace. This runs on every run of devtool, but it lets us ensure
- the unlocked items are in sync with the workspace. """
-
- confdir = os.path.join(basepath, 'conf')
- unlockedsigs = os.path.join(confdir, 'unlocked-sigs.inc')
- bb.utils.mkdirhier(confdir)
- with open(os.path.join(confdir, 'unlocked-sigs.inc'), 'w') as f:
- f.write("# DO NOT MODIFY! YOUR CHANGES WILL BE LOST.\n" +
- "# This layer was created by the OpenEmbedded devtool" +
- " utility in order to\n" +
- "# contain recipes that are unlocked.\n")
-
- f.write('SIGGEN_UNLOCKED_RECIPES += "\\\n')
- for pn in workspace:
- f.write(' ' + pn)
- f.write('"')
+ pnvalues['srctree'] = res.group(3)
+ pnvalues['bbappend'] = fn
+ pnvalues['recipefile'] = recipefile
+ elif line.startswith('# srctreebase: '):
+ pnvalues['srctreebase'] = line.split(':', 1)[1].strip()
+ if pnvalues:
+ if not pnvalues.get('srctreebase', None):
+ pnvalues['srctreebase'] = pnvalues['srctree']
+ logger.debug('Found recipe %s' % pnvalues)
+ workspace[pn] = pnvalues
def create_workspace(args, config, basepath, workspace):
if args.layerpath:
@@ -176,6 +155,7 @@ def _create_workspace(workspacedir, config, basepath):
f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n')
f.write('BBFILE_PRIORITY_workspacelayer = "99"\n')
+ f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n')
# Add a README file
with open(os.path.join(workspacedir, 'README'), 'w') as f:
f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
@@ -201,7 +181,11 @@ def _enable_workspace_layer(workspacedir, config, basepath):
if not os.path.exists(bblayers_conf):
logger.error('Unable to find bblayers.conf')
return
- _, added = bb.utils.edit_bblayers_conf(bblayers_conf, workspacedir, config.workspace_path)
+ if os.path.abspath(workspacedir) != os.path.abspath(config.workspace_path):
+ removedir = config.workspace_path
+ else:
+ removedir = None
+ _, added = bb.utils.edit_bblayers_conf(bblayers_conf, workspacedir, removedir)
if added:
logger.info('Enabling workspace layer in bblayers.conf')
if config.workspace_path != workspacedir:
@@ -215,6 +199,9 @@ def main():
global config
global context
+ if sys.getfilesystemencoding() != "utf-8":
+ sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
+
context = Context(fixed_setup=False)
# Default basepath
@@ -288,13 +275,17 @@ def main():
scriptutils.logger_setup_color(logger, global_args.color)
if global_args.bbpath is None:
- tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
- global_args.bbpath = tinfoil.config_data.getVar('BBPATH', True)
- finally:
- tinfoil.shutdown()
-
- for path in [scripts_path] + global_args.bbpath.split(':'):
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ global_args.bbpath = tinfoil.config_data.getVar('BBPATH')
+ finally:
+ tinfoil.shutdown()
+ except bb.BBHandledException:
+ return 2
+
+ # Search BBPATH first to allow layers to override plugins in scripts_path
+ for path in global_args.bbpath.split(':') + [scripts_path]:
pluginpath = os.path.join(path, 'lib', 'devtool')
scriptutils.load_plugins(logger, plugins, pluginpath)
@@ -325,7 +316,6 @@ def main():
if not getattr(args, 'no_workspace', False):
read_workspace()
- create_unlockedsigs()
try:
ret = args.func(args, config, basepath, workspace)
diff --git a/scripts/gen-lockedsig-cache b/scripts/gen-lockedsig-cache
index 49de74ed9b..9bfae9d832 100755
--- a/scripts/gen-lockedsig-cache
+++ b/scripts/gen-lockedsig-cache
@@ -1,10 +1,13 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
-import glob
import shutil
import errno
+import time
def mkdir(d):
try:
@@ -13,6 +16,38 @@ def mkdir(d):
if e.errno != errno.EEXIST:
raise e
+# extract the hash from past the last colon to last underscore
+def extract_sha(filename):
+ return filename.split(':')[7].split('_')[0]
+
+# get all files in a directory, extract hash and make
+# a map from hash to list of file with that hash
+def map_sha_to_files(dir_, prefix, sha_map):
+ sstate_prefix_path = dir_ + '/' + prefix + '/'
+ if not os.path.exists(sstate_prefix_path):
+ return
+ sstate_files = os.listdir(sstate_prefix_path)
+ for f in sstate_files:
+ try:
+ sha = extract_sha(f)
+ if sha not in sha_map:
+ sha_map[sha] = []
+ sha_map[sha].append(sstate_prefix_path + f)
+ except IndexError:
+ continue
+
+# given a prefix build a map of hash to list of files
+def build_sha_cache(prefix):
+ sha_map = {}
+
+ sstate_dir = sys.argv[2]
+ map_sha_to_files(sstate_dir, prefix, sha_map)
+
+ native_sstate_dir = sys.argv[2] + '/' + sys.argv[4]
+ map_sha_to_files(native_sstate_dir, prefix, sha_map)
+
+ return sha_map
+
if len(sys.argv) < 5:
print("Incorrect number of arguments specified")
print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
@@ -38,12 +73,19 @@ with open(sys.argv[1]) as f:
sigs.append(sig)
print('Gathering file list')
+start_time = time.perf_counter()
files = set()
+sstate_content_cache = {}
for s in sigs:
- p = sys.argv[2] + "/" + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
- p = sys.argv[2] + "/%s/" % sys.argv[4] + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
+ prefix = s[:2]
+ if prefix not in sstate_content_cache:
+ sstate_content_cache[prefix] = build_sha_cache(prefix)
+
+ for f in sstate_content_cache[prefix][s]:
+ files.add(f)
+
+elapsed = time.perf_counter() - start_time
+print("Gathering file list took %.1fs" % elapsed)
print('Processing files')
for f in files:
@@ -62,7 +104,11 @@ for f in files:
os.remove(dst)
if (os.stat(src).st_dev == os.stat(destdir).st_dev):
print('linking')
- os.link(src, dst)
+ try:
+ os.link(src, dst)
+ except OSError as e:
+ print('hard linking failed, copying')
+ shutil.copyfile(src, dst)
else:
print('copying')
shutil.copyfile(src, dst)
diff --git a/scripts/gen-site-config b/scripts/gen-site-config
index 7da7a0bd8a..727b809c0f 100755
--- a/scripts/gen-site-config
+++ b/scripts/gen-site-config
@@ -1,18 +1,8 @@
#! /bin/sh
# Copyright (c) 2005-2008 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
cat << EOF
AC_PREREQ(2.57)
diff --git a/scripts/lib/argparse_oe.py b/scripts/lib/argparse_oe.py
index bf6eb17197..94a4ac5011 100644
--- a/scripts/lib/argparse_oe.py
+++ b/scripts/lib/argparse_oe.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import argparse
from collections import defaultdict, OrderedDict
@@ -167,3 +171,10 @@ class OeHelpFormatter(argparse.HelpFormatter):
return '\n'.join(lines)
else:
return super(OeHelpFormatter, self)._format_action(action)
+
+def int_positive(value):
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(
+ "%s is not a positive int value" % value)
+ return ivalue
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py
new file mode 100644
index 0000000000..dcbb78042d
--- /dev/null
+++ b/scripts/lib/build_perf/__init__.py
@@ -0,0 +1,24 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Build performance test library functions"""
+
+def print_table(rows, row_fmt=None):
+ """Print data table"""
+ if not rows:
+ return
+ if not row_fmt:
+ row_fmt = ['{:{wid}} '] * len(rows[0])
+
+ # Go through the data to get maximum cell widths
+ num_cols = len(row_fmt)
+ col_widths = [0] * num_cols
+ for row in rows:
+ for i, val in enumerate(row):
+ col_widths[i] = max(col_widths[i], len(str(val)))
+
+ for row in rows:
+ print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])
+
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py
new file mode 100644
index 0000000000..d1273c9c50
--- /dev/null
+++ b/scripts/lib/build_perf/html.py
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Helper module for HTML reporting"""
+from jinja2 import Environment, PackageLoader
+
+
+env = Environment(loader=PackageLoader('build_perf', 'html'))
+
+template = env.get_template('report.html')
diff --git a/scripts/lib/build_perf/html/measurement_chart.html b/scripts/lib/build_perf/html/measurement_chart.html
new file mode 100644
index 0000000000..65f1a227ad
--- /dev/null
+++ b/scripts/lib/build_perf/html/measurement_chart.html
@@ -0,0 +1,50 @@
+<script type="text/javascript">
+ chartsDrawing += 1;
+ google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }});
+ function drawChart_{{ chart_elem_id }}() {
+ var data = new google.visualization.DataTable();
+
+ // Chart options
+ var options = {
+ theme : 'material',
+ legend: 'none',
+ hAxis: { format: '', title: 'Commit number',
+ minValue: {{ chart_opts.haxis.min }},
+ maxValue: {{ chart_opts.haxis.max }} },
+ {% if measurement.type == 'time' %}
+ vAxis: { format: 'h:mm:ss' },
+ {% else %}
+ vAxis: { format: '' },
+ {% endif %}
+ pointSize: 5,
+ chartArea: { left: 80, right: 15 },
+ };
+
+ // Define data columns
+ data.addColumn('number', 'Commit');
+ data.addColumn('{{ measurement.value_type.gv_data_type }}',
+ '{{ measurement.value_type.quantity }}');
+ // Add data rows
+ data.addRows([
+ {% for sample in measurement.samples %}
+ [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}],
+ {% endfor %}
+ ]);
+
+ // Finally, draw the chart
+ chart_div = document.getElementById('{{ chart_elem_id }}');
+ var chart = new google.visualization.LineChart(chart_div);
+ google.visualization.events.addListener(chart, 'ready', function () {
+ //chart_div = document.getElementById('{{ chart_elem_id }}');
+ //chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">';
+ png_div = document.getElementById('{{ chart_elem_id }}_png');
+ png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>';
+ console.log("CHART READY: {{ chart_elem_id }}");
+ chartsDrawing -= 1;
+ if (chartsDrawing == 0)
+ console.log("ALL CHARTS READY");
+ });
+ chart.draw(data, options);
+}
+</script>
+
diff --git a/scripts/lib/build_perf/html/report.html b/scripts/lib/build_perf/html/report.html
new file mode 100644
index 0000000000..d1ba6f2578
--- /dev/null
+++ b/scripts/lib/build_perf/html/report.html
@@ -0,0 +1,289 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+{# Scripts, for visualization#}
+<!--START-OF-SCRIPTS-->
+<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="text/javascript">
+google.charts.load('current', {'packages':['corechart']});
+var chartsDrawing = 0;
+</script>
+
+{# Render measurement result charts #}
+{% for test in test_data %}
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
+ {% include 'measurement_chart.html' %}
+ {% endfor %}
+ {% endif %}
+{% endfor %}
+
+<!--END-OF-SCRIPTS-->
+
+{# Styles #}
+<style>
+.meta-table {
+ font-size: 14px;
+ text-align: left;
+ border-collapse: collapse;
+}
+.meta-table tr:nth-child(even){background-color: #f2f2f2}
+meta-table th, .meta-table td {
+ padding: 4px;
+}
+.summary {
+ margin: 0;
+ font-size: 14px;
+ text-align: left;
+ border-collapse: collapse;
+}
+summary th, .meta-table td {
+ padding: 4px;
+}
+.measurement {
+ padding: 8px 0px 8px 8px;
+ border: 2px solid #f0f0f0;
+ margin-bottom: 10px;
+}
+.details {
+ margin: 0;
+ font-size: 12px;
+ text-align: left;
+ border-collapse: collapse;
+}
+.details th {
+ padding-right: 8px;
+}
+.details.plain th {
+ font-weight: normal;
+}
+.preformatted {
+ font-family: monospace;
+ white-space: pre-wrap;
+ background-color: #f0f0f0;
+ margin-left: 10px;
+}
+hr {
+ color: #f0f0f0;
+}
+h2 {
+ font-size: 20px;
+ margin-bottom: 0px;
+ color: #707070;
+}
+h3 {
+ font-size: 16px;
+ margin: 0px;
+ color: #707070;
+}
+</style>
+
+<title>{{ title }}</title>
+</head>
+
+{% macro poky_link(commit) -%}
+ <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
+{%- endmacro %}
+
+<body><div style="width: 700px">
+ {# Test metadata #}
+ <h2>General</h2>
+ <hr>
+ <table class="meta-table" style="width: 100%">
+ <tr>
+ <th></th>
+ <th>Current commit</th>
+ <th>Comparing with</th>
+ </tr>
+ {% for key, item in metadata.items() %}
+ <tr>
+ <th>{{ item.title }}</th>
+ {%if key == 'commit' %}
+ <td>{{ poky_link(item.value) }}</td>
+ <td>{{ poky_link(item.value_old) }}</td>
+ {% else %}
+ <td>{{ item.value }}</td>
+ <td>{{ item.value_old }}</td>
+ {% endif %}
+ </tr>
+ {% endfor %}
+ </table>
+
+ {# Test result summary #}
+ <h2>Test result summary</h2>
+ <hr>
+ <table class="summary" style="width: 100%">
+ {% for test in test_data %}
+ {% if loop.index is even %}
+ {% set row_style = 'style="background-color: #f2f2f2"' %}
+ {% else %}
+ {% set row_style = 'style="background-color: #ffffff"' %}
+ {% endif %}
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ <tr {{ row_style }}>
+ {% if loop.index == 1 %}
+ <td>{{ test.name }}: {{ test.description }}</td>
+ {% else %}
+ {# add empty cell in place of the test name#}
+ <td></td>
+ {% endif %}
+ {% if measurement.absdiff > 0 %}
+ {% set result_style = "color: red" %}
+ {% elif measurement.absdiff == measurement.absdiff %}
+ {% set result_style = "color: green" %}
+ {% else %}
+ {% set result_style = "color: orange" %}
+ {%endif %}
+ {% if measurement.reldiff|abs > 2 %}
+ {% set result_style = result_style + "; font-weight: bold" %}
+ {% endif %}
+ <td>{{ measurement.description }}</td>
+ <td style="font-weight: bold">{{ measurement.value.mean }}</td>
+ <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
+ <td style="{{ result_style }}">{{ measurement.reldiff_str }}</td>
+ </tr>
+ {% endfor %}
+ {% else %}
+ <td style="font-weight: bold; color: red;">{{test.status }}</td>
+ <td></td> <td></td> <td></td> <td></td>
+ {% endif %}
+ {% endfor %}
+ </table>
+
+ {# Detailed test results #}
+ {% for test in test_data %}
+ <h2>{{ test.name }}: {{ test.description }}</h2>
+ <hr>
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ <div class="measurement">
+ <h3>{{ measurement.description }}</h3>
+ <div style="font-weight:bold;">
+ <span style="font-size: 23px;">{{ measurement.value.mean }}</span>
+ <span style="font-size: 20px; margin-left: 12px">
+ {% if measurement.absdiff > 0 %}
+ <span style="color: red">
+ {% elif measurement.absdiff == measurement.absdiff %}
+ <span style="color: green">
+ {% else %}
+ <span style="color: orange">
+ {% endif %}
+ {{ measurement.absdiff_str }} ({{measurement.reldiff_str}})
+ </span></span>
+ </div>
+ {# Table for trendchart and the statistics #}
+ <table style="width: 100%">
+ <tr>
+ <td style="width: 75%">
+ {# Linechart #}
+ <div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
+ </td>
+ <td>
+ {# Measurement statistics #}
+ <table class="details plain">
+ <tr>
+ <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
+ </tr><tr>
+ <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
+ </tr><tr>
+ <th>Min</th><td>{{ measurement.value.min }}</td>
+ </tr><tr>
+ <th>Max</th><td>{{ measurement.value.max }}</td>
+ </tr><tr>
+ <th>Stdev</th><td>{{ measurement.value.stdev }}</td>
+ </tr><tr>
+ <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
+ <td></td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+
+ {# Task and recipe summary from buildstats #}
+ {% if 'buildstats' in measurement %}
+ Task resource usage
+ <table class="details" style="width:100%">
+ <tr>
+ <th>Number of tasks</th>
+ <th>Top consumers of cputime</th>
+ </tr>
+ <tr>
+ <td style="vertical-align: top">{{ measurement.buildstats.tasks.count }} ({{ measurement.buildstats.tasks.change }})</td>
+ {# Table of most resource-hungry tasks #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_consumer|reverse %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%0.0f' % diff.value2 }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <th>Biggest increase in cputime</th>
+ <th>Biggest decrease in cputime</th>
+ </tr>
+ <tr>
+ {# Table biggest increase in resource usage #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_increase|reverse %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%+0.0f' % diff.absdiff }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ {# Table biggest decrease in resource usage #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_decrease %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%+0.0f' % diff.absdiff }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ </tr>
+ </table>
+
+ {# Recipe version differences #}
+ {% if measurement.buildstats.ver_diff %}
+ <div style="margin-top: 16px">Recipe version changes</div>
+ <table class="details">
+ {% for head, recipes in measurement.buildstats.ver_diff.items() %}
+ <tr>
+ <th colspan="2">{{ head }}</th>
+ </tr>
+ {% for name, info in recipes|sort %}
+ <tr>
+ <td>{{ name }}</td>
+ <td>{{ info }}</td>
+ </tr>
+ {% endfor %}
+ {% endfor %}
+ </table>
+ {% else %}
+ <div style="margin-top: 16px">No recipe version changes detected</div>
+ {% endif %}
+ {% endif %}
+ </div>
+ {% endfor %}
+ {# Unsuccessful test #}
+ {% else %}
+ <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
+ {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
+ </span>
+ <div class="preformatted">{{ test.message }}</div>
+ {% endif %}
+ {% endfor %}
+</div></body>
+</html>
+
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
new file mode 100644
index 0000000000..4e8e2a8a93
--- /dev/null
+++ b/scripts/lib/build_perf/report.py
@@ -0,0 +1,338 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Handling of build perf test reports"""
+from collections import OrderedDict, Mapping, namedtuple
+from datetime import datetime, timezone
+from numbers import Number
+from statistics import mean, stdev, variance
+
+
+AggregateTestData = namedtuple('AggregateTestData', ['metadata', 'results'])
+
+
+def isofmt_to_timestamp(string):
+ """Convert timestamp string in ISO 8601 format into unix timestamp"""
+ if '.' in string:
+ dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
+ else:
+ dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
+ return dt.replace(tzinfo=timezone.utc).timestamp()
+
+
+def metadata_xml_to_json(elem):
+ """Convert metadata xml into JSON format"""
+ assert elem.tag == 'metadata', "Invalid metadata file format"
+
+ def _xml_to_json(elem):
+ """Convert xml element to JSON object"""
+ out = OrderedDict()
+ for child in elem.getchildren():
+ key = child.attrib.get('name', child.tag)
+ if len(child):
+ out[key] = _xml_to_json(child)
+ else:
+ out[key] = child.text
+ return out
+ return _xml_to_json(elem)
+
+
+def results_xml_to_json(elem):
+ """Convert results xml into JSON format"""
+ rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
+ 'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
+ 'ru_nivcsw')
+ iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
+ 'write_bytes', 'cancelled_write_bytes')
+
+ def _read_measurement(elem):
+ """Convert measurement to JSON"""
+ data = OrderedDict()
+ data['type'] = elem.tag
+ data['name'] = elem.attrib['name']
+ data['legend'] = elem.attrib['legend']
+ values = OrderedDict()
+
+ # SYSRES measurement
+ if elem.tag == 'sysres':
+ for subel in elem:
+ if subel.tag == 'time':
+ values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
+ values['elapsed_time'] = float(subel.text)
+ elif subel.tag == 'rusage':
+ rusage = OrderedDict()
+ for field in rusage_fields:
+ if 'time' in field:
+ rusage[field] = float(subel.attrib[field])
+ else:
+ rusage[field] = int(subel.attrib[field])
+ values['rusage'] = rusage
+ elif subel.tag == 'iostat':
+ values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
+ for f in iostat_fields])
+ elif subel.tag == 'buildstats_file':
+ values['buildstats_file'] = subel.text
+ else:
+ raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
+ # DISKUSAGE measurement
+ elif elem.tag == 'diskusage':
+ values['size'] = int(elem.find('size').text)
+ else:
+ raise Exception("Unknown measurement tag '{}'".format(elem.tag))
+ data['values'] = values
+ return data
+
+ def _read_testcase(elem):
+ """Convert testcase into JSON"""
+ assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
+
+ data = OrderedDict()
+ data['name'] = elem.attrib['name']
+ data['description'] = elem.attrib['description']
+ data['status'] = 'SUCCESS'
+ data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+ data['elapsed_time'] = float(elem.attrib['time'])
+ measurements = OrderedDict()
+
+ for subel in elem.getchildren():
+ if subel.tag == 'error' or subel.tag == 'failure':
+ data['status'] = subel.tag.upper()
+ data['message'] = subel.attrib['message']
+ data['err_type'] = subel.attrib['type']
+ data['err_output'] = subel.text
+ elif subel.tag == 'skipped':
+ data['status'] = 'SKIPPED'
+ data['message'] = subel.text
+ else:
+ measurements[subel.attrib['name']] = _read_measurement(subel)
+ data['measurements'] = measurements
+ return data
+
+ def _read_testsuite(elem):
+ """Convert suite to JSON"""
+ assert elem.tag == 'testsuite', \
+ "Expecting 'testsuite' element instead of {}".format(elem.tag)
+
+ data = OrderedDict()
+ if 'hostname' in elem.attrib:
+ data['tester_host'] = elem.attrib['hostname']
+ data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+ data['elapsed_time'] = float(elem.attrib['time'])
+ tests = OrderedDict()
+
+ for case in elem.getchildren():
+ tests[case.attrib['name']] = _read_testcase(case)
+ data['tests'] = tests
+ return data
+
+ # Main function
+ assert elem.tag == 'testsuites', "Invalid test report format"
+ assert len(elem) == 1, "Too many testsuites"
+
+ return _read_testsuite(elem.getchildren()[0])
+
+
+def aggregate_metadata(metadata):
+ """Aggregate metadata into one, basically a sanity check"""
+ mutable_keys = ('pretty_name', 'version_id')
+
+ def aggregate_obj(aggregate, obj, assert_str=True):
+ """Aggregate objects together"""
+ assert type(aggregate) is type(obj), \
+ "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+ if isinstance(obj, Mapping):
+ assert set(aggregate.keys()) == set(obj.keys())
+ for key, val in obj.items():
+ aggregate_obj(aggregate[key], val, key not in mutable_keys)
+ elif isinstance(obj, list):
+ assert len(aggregate) == len(obj)
+ for i, val in enumerate(obj):
+ aggregate_obj(aggregate[i], val)
+ elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
+ assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+
+ if not metadata:
+ return {}
+
+ # Do the aggregation
+ aggregate = metadata[0].copy()
+ for testrun in metadata[1:]:
+ aggregate_obj(aggregate, testrun)
+ aggregate['testrun_count'] = len(metadata)
+ return aggregate
+
+
+def aggregate_data(data):
+ """Aggregate multiple test results JSON structures into one"""
+
+ mutable_keys = ('status', 'message', 'err_type', 'err_output')
+
+ class SampleList(list):
+ """Container for numerical samples"""
+ pass
+
+ def new_aggregate_obj(obj):
+ """Create new object for aggregate"""
+ if isinstance(obj, Number):
+ new_obj = SampleList()
+ new_obj.append(obj)
+ elif isinstance(obj, str):
+ new_obj = obj
+ else:
+ # Lists and and dicts are kept as is
+ new_obj = obj.__class__()
+ aggregate_obj(new_obj, obj)
+ return new_obj
+
+ def aggregate_obj(aggregate, obj, assert_str=True):
+ """Recursive "aggregation" of JSON objects"""
+ if isinstance(obj, Number):
+ assert isinstance(aggregate, SampleList)
+ aggregate.append(obj)
+ return
+
+ assert type(aggregate) == type(obj), \
+ "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+ if isinstance(obj, Mapping):
+ for key, val in obj.items():
+ if not key in aggregate:
+ aggregate[key] = new_aggregate_obj(val)
+ else:
+ aggregate_obj(aggregate[key], val, key not in mutable_keys)
+ elif isinstance(obj, list):
+ for i, val in enumerate(obj):
+ if i >= len(aggregate):
+ aggregate[key] = new_aggregate_obj(val)
+ else:
+ aggregate_obj(aggregate[i], val)
+ elif isinstance(obj, str):
+ # Sanity check for data
+ if assert_str:
+ assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+ else:
+ raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
+
+ if not data:
+ return {}
+
+ # Do the aggregation
+ aggregate = data[0].__class__()
+ for testrun in data:
+ aggregate_obj(aggregate, testrun)
+ return aggregate
+
+
+class MeasurementVal(float):
+ """Base class representing measurement values"""
+ gv_data_type = 'number'
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ else:
+ return self
+
+
+class TimeVal(MeasurementVal):
+ """Class representing time values"""
+ quantity = 'time'
+ gv_title = 'elapsed time'
+ gv_data_type = 'timeofday'
+
+ def hms(self):
+ """Split time into hours, minutes and seconeds"""
+ hhh = int(abs(self) / 3600)
+ mmm = int((abs(self) % 3600) / 60)
+ sss = abs(self) % 60
+ return hhh, mmm, sss
+
+ def __str__(self):
+ if self != self:
+ return "nan"
+ hh, mm, ss = self.hms()
+ sign = '-' if self < 0 else ''
+ if hh > 0:
+ return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
+ elif mm > 0:
+ return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
+ elif ss > 1:
+ return '{}{:.1f} s'.format(sign, ss)
+ else:
+ return '{}{:.2f} s'.format(sign, ss)
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ hh, mm, ss = self.hms()
+ return [hh, mm, int(ss), int(ss*1000) % 1000]
+
+
+class SizeVal(MeasurementVal):
+ """Class representing time values"""
+ quantity = 'size'
+ gv_title = 'size in MiB'
+ gv_data_type = 'number'
+
+ def __str__(self):
+ if self != self:
+ return "nan"
+ if abs(self) < 1024:
+ return '{:.1f} kiB'.format(self)
+ elif abs(self) < 1048576:
+ return '{:.2f} MiB'.format(self / 1024)
+ else:
+ return '{:.2f} GiB'.format(self / 1048576)
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ return self / 1024
+
+def measurement_stats(meas, prefix=''):
+ """Get statistics of a measurement"""
+ if not meas:
+ return {prefix + 'sample_cnt': 0,
+ prefix + 'mean': MeasurementVal('nan'),
+ prefix + 'stdev': MeasurementVal('nan'),
+ prefix + 'variance': MeasurementVal('nan'),
+ prefix + 'min': MeasurementVal('nan'),
+ prefix + 'max': MeasurementVal('nan'),
+ prefix + 'minus': MeasurementVal('nan'),
+ prefix + 'plus': MeasurementVal('nan')}
+
+ stats = {'name': meas['name']}
+ if meas['type'] == 'sysres':
+ val_cls = TimeVal
+ values = meas['values']['elapsed_time']
+ elif meas['type'] == 'diskusage':
+ val_cls = SizeVal
+ values = meas['values']['size']
+ else:
+ raise Exception("Unknown measurement type '{}'".format(meas['type']))
+ stats['val_cls'] = val_cls
+ stats['quantity'] = val_cls.quantity
+ stats[prefix + 'sample_cnt'] = len(values)
+
+ mean_val = val_cls(mean(values))
+ min_val = val_cls(min(values))
+ max_val = val_cls(max(values))
+
+ stats[prefix + 'mean'] = mean_val
+ if len(values) > 1:
+ stats[prefix + 'stdev'] = val_cls(stdev(values))
+ stats[prefix + 'variance'] = val_cls(variance(values))
+ else:
+ stats[prefix + 'stdev'] = float('nan')
+ stats[prefix + 'variance'] = float('nan')
+ stats[prefix + 'min'] = min_val
+ stats[prefix + 'max'] = max_val
+ stats[prefix + 'minus'] = val_cls(mean_val - min_val)
+ stats[prefix + 'plus'] = val_cls(max_val - mean_val)
+
+ return stats
+
diff --git a/scripts/lib/build_perf/scrape-html-report.js b/scripts/lib/build_perf/scrape-html-report.js
new file mode 100644
index 0000000000..05a1f57001
--- /dev/null
+++ b/scripts/lib/build_perf/scrape-html-report.js
@@ -0,0 +1,56 @@
+var fs = require('fs');
+var system = require('system');
+var page = require('webpage').create();
+
+// Examine console log for message from chart drawing
+page.onConsoleMessage = function(msg) {
+ console.log(msg);
+ if (msg === "ALL CHARTS READY") {
+ window.charts_ready = true;
+ }
+ else if (msg.slice(0, 11) === "CHART READY") {
+ var chart_id = msg.split(" ")[2];
+ console.log('grabbing ' + chart_id);
+ var png_data = page.evaluate(function (chart_id) {
+ var chart_div = document.getElementById(chart_id + '_png');
+ return chart_div.outerHTML;
+ }, chart_id);
+ fs.write(args[2] + '/' + chart_id + '.png', png_data, 'w');
+ }
+};
+
+// Check command line arguments
+var args = system.args;
+if (args.length != 3) {
+ console.log("USAGE: " + args[0] + " REPORT_HTML OUT_DIR\n");
+ phantom.exit(1);
+}
+
+// Open the web page
+page.open(args[1], function(status) {
+ if (status == 'fail') {
+ console.log("Failed to open file '" + args[1] + "'");
+ phantom.exit(1);
+ }
+});
+
+// Check status every 100 ms
+interval = window.setInterval(function () {
+ //console.log('waiting');
+ if (window.charts_ready) {
+ clearTimeout(timer);
+ clearInterval(interval);
+
+ var fname = args[1].replace(/\/+$/, "").split("/").pop()
+ console.log("saving " + fname);
+ fs.write(args[2] + '/' + fname, page.content, 'w');
+ phantom.exit(0);
+ }
+}, 100);
+
+// Time-out after 10 seconds
+timer = window.setTimeout(function () {
+ clearInterval(interval);
+ console.log("ERROR: timeout");
+ phantom.exit(1);
+}, 10000);
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
new file mode 100644
index 0000000000..c69b5bf4d7
--- /dev/null
+++ b/scripts/lib/buildstats.py
@@ -0,0 +1,346 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Functionality for analyzing buildstats"""
+import json
+import logging
+import os
+import re
+from collections import namedtuple,OrderedDict
+from statistics import mean
+
+
+log = logging.getLogger()
+
+
+taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
+ 'absdiff', 'reldiff')
+TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
+
+
+class BSError(Exception):
+ """Error handling of buildstats"""
+ pass
+
+
+class BSTask(dict):
+ def __init__(self, *args, **kwargs):
+ self['start_time'] = None
+ self['elapsed_time'] = None
+ self['status'] = None
+ self['iostat'] = {}
+ self['rusage'] = {}
+ self['child_rusage'] = {}
+ super(BSTask, self).__init__(*args, **kwargs)
+
+ @property
+ def cputime(self):
+ """Sum of user and system time taken by the task"""
+ rusage = self['rusage']['ru_stime'] + self['rusage']['ru_utime']
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return rusage + self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
+ else:
+ return rusage
+
+ @property
+ def walltime(self):
+ """Elapsed wall clock time"""
+ return self['elapsed_time']
+
+ @property
+ def read_bytes(self):
+ """Bytes read from the block layer"""
+ return self['iostat']['read_bytes']
+
+ @property
+ def write_bytes(self):
+ """Bytes written to the block layer"""
+ return self['iostat']['write_bytes']
+
+ @property
+ def read_ops(self):
+ """Number of read operations on the block layer"""
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return self['rusage']['ru_inblock'] + self['child_rusage']['ru_inblock']
+ else:
+ return self['rusage']['ru_inblock']
+
+ @property
+ def write_ops(self):
+ """Number of write operations on the block layer"""
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return self['rusage']['ru_oublock'] + self['child_rusage']['ru_oublock']
+ else:
+ return self['rusage']['ru_oublock']
+
+ @classmethod
+ def from_file(cls, buildstat_file):
+ """Read buildstat text file"""
+ bs_task = cls()
+ log.debug("Reading task buildstats from %s", buildstat_file)
+ end_time = None
+ with open(buildstat_file) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Started':
+ start_time = float(val)
+ bs_task['start_time'] = start_time
+ elif key == 'Ended':
+ end_time = float(val)
+ elif key.startswith('IO '):
+ split = key.split()
+ bs_task['iostat'][split[1]] = int(val)
+ elif key.find('rusage') >= 0:
+ split = key.split()
+ ru_key = split[-1]
+ if ru_key in ('ru_stime', 'ru_utime'):
+ val = float(val)
+ else:
+ val = int(val)
+ ru_type = 'rusage' if split[0] == 'rusage' else \
+ 'child_rusage'
+ bs_task[ru_type][ru_key] = val
+ elif key == 'Status':
+ bs_task['status'] = val
+ if end_time is not None and start_time is not None:
+ bs_task['elapsed_time'] = end_time - start_time
+ else:
+ raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
+ return bs_task
+
+
+class BSTaskAggregate(object):
+ """Class representing multiple runs of the same task"""
+ properties = ('cputime', 'walltime', 'read_bytes', 'write_bytes',
+ 'read_ops', 'write_ops')
+
+ def __init__(self, tasks=None):
+ self._tasks = tasks or []
+ self._properties = {}
+
+ def __getattr__(self, name):
+ if name in self.properties:
+ if name not in self._properties:
+ # Calculate properties on demand only. We only provide mean
+ # value, so far
+ self._properties[name] = mean([getattr(t, name) for t in self._tasks])
+ return self._properties[name]
+ else:
+ raise AttributeError("'BSTaskAggregate' has no attribute '{}'".format(name))
+
+ def append(self, task):
+ """Append new task"""
+ # Reset pre-calculated properties
+ assert isinstance(task, BSTask), "Type is '{}' instead of 'BSTask'".format(type(task))
+ self._properties = {}
+ self._tasks.append(task)
+
+
+class BSRecipe(object):
+ """Class representing buildstats of one recipe"""
+ def __init__(self, name, epoch, version, revision):
+ self.name = name
+ self.epoch = epoch
+ self.version = version
+ self.revision = revision
+ if epoch is None:
+ self.evr = "{}-{}".format(version, revision)
+ else:
+ self.evr = "{}_{}-{}".format(epoch, version, revision)
+ self.tasks = {}
+
+ def aggregate(self, bsrecipe):
+ """Aggregate data of another recipe buildstats"""
+ if self.nevr != bsrecipe.nevr:
+ raise ValueError("Refusing to aggregate buildstats, recipe version "
+ "differs: {} vs. {}".format(self.nevr, bsrecipe.nevr))
+ if set(self.tasks.keys()) != set(bsrecipe.tasks.keys()):
+ raise ValueError("Refusing to aggregate buildstats, set of tasks "
+ "in {} differ".format(self.name))
+
+ for taskname, taskdata in bsrecipe.tasks.items():
+ if not isinstance(self.tasks[taskname], BSTaskAggregate):
+ self.tasks[taskname] = BSTaskAggregate([self.tasks[taskname]])
+ self.tasks[taskname].append(taskdata)
+
+ @property
+ def nevr(self):
+ return self.name + '-' + self.evr
+
+
+class BuildStats(dict):
+ """Class representing buildstats of one build"""
+
+ @property
+ def num_tasks(self):
+ """Get number of tasks"""
+ num = 0
+ for recipe in self.values():
+ num += len(recipe.tasks)
+ return num
+
+ @classmethod
+ def from_json(cls, bs_json):
+ """Create new BuildStats object from JSON object"""
+ buildstats = cls()
+ for recipe in bs_json:
+ if recipe['name'] in buildstats:
+ raise BSError("Cannot handle multiple versions of the same "
+ "package ({})".format(recipe['name']))
+ bsrecipe = BSRecipe(recipe['name'], recipe['epoch'],
+ recipe['version'], recipe['revision'])
+ for task, data in recipe['tasks'].items():
+ bsrecipe.tasks[task] = BSTask(data)
+
+ buildstats[recipe['name']] = bsrecipe
+
+ return buildstats
+
+ @staticmethod
+ def from_file_json(path):
+ """Load buildstats from a JSON file"""
+ with open(path) as fobj:
+ bs_json = json.load(fobj)
+ return BuildStats.from_json(bs_json)
+
+
+ @staticmethod
+ def split_nevr(nevr):
+ """Split name and version information from recipe "nevr" string"""
+ n_e_v, revision = nevr.rsplit('-', 1)
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
+ n_e_v)
+ if not match:
+ # If we're not able to parse a version starting with a number, just
+ # take the part after last dash
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
+ n_e_v)
+ name = match.group('name')
+ version = match.group('version')
+ epoch = match.group('epoch')
+ return name, epoch, version, revision
+
+ @classmethod
+ def from_dir(cls, path):
+ """Load buildstats from a buildstats directory"""
+ if not os.path.isfile(os.path.join(path, 'build_stats')):
+ raise BSError("{} does not look like a buildstats directory".format(path))
+
+ log.debug("Reading buildstats directory %s", path)
+
+ buildstats = cls()
+ subdirs = os.listdir(path)
+ for dirname in subdirs:
+ recipe_dir = os.path.join(path, dirname)
+ if not os.path.isdir(recipe_dir):
+ continue
+ name, epoch, version, revision = cls.split_nevr(dirname)
+ bsrecipe = BSRecipe(name, epoch, version, revision)
+ for task in os.listdir(recipe_dir):
+ bsrecipe.tasks[task] = BSTask.from_file(
+ os.path.join(recipe_dir, task))
+ if name in buildstats:
+ raise BSError("Cannot handle multiple versions of the same "
+ "package ({})".format(name))
+ buildstats[name] = bsrecipe
+
+ return buildstats
+
+ def aggregate(self, buildstats):
+ """Aggregate other buildstats into this"""
+ if set(self.keys()) != set(buildstats.keys()):
+ raise ValueError("Refusing to aggregate buildstats, set of "
+ "recipes is different: %s" % (set(self.keys()) ^ set(buildstats.keys())))
+ for pkg, data in buildstats.items():
+ self[pkg].aggregate(data)
+
+
+def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None, only_tasks=[]):
+ """Compare the tasks of two buildstats"""
+ tasks_diff = []
+ pkgs = set(bs1.keys()).union(set(bs2.keys()))
+ for pkg in pkgs:
+ tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
+ tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
+ if only_tasks:
+ tasks1 = {k: v for k, v in tasks1.items() if k in only_tasks}
+ tasks2 = {k: v for k, v in tasks2.items() if k in only_tasks}
+
+ if not tasks1:
+ pkg_op = '+'
+ elif not tasks2:
+ pkg_op = '-'
+ else:
+ pkg_op = ' '
+
+ for task in set(tasks1.keys()).union(set(tasks2.keys())):
+ task_op = ' '
+ if task in tasks1:
+ val1 = getattr(bs1[pkg].tasks[task], stat_attr)
+ else:
+ task_op = '+'
+ val1 = 0
+ if task in tasks2:
+ val2 = getattr(bs2[pkg].tasks[task], stat_attr)
+ else:
+ val2 = 0
+ task_op = '-'
+
+ if val1 == 0:
+ reldiff = float('inf')
+ else:
+ reldiff = 100 * (val2 - val1) / val1
+
+ if min_val and max(val1, val2) < min_val:
+ log.debug("Filtering out %s:%s (%s)", pkg, task,
+ max(val1, val2))
+ continue
+ if min_absdiff and abs(val2 - val1) < min_absdiff:
+ log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
+ val2-val1)
+ continue
+ tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
+ val2-val1, reldiff))
+ return tasks_diff
+
+
+class BSVerDiff(object):
+ """Class representing recipe version differences between two buildstats"""
+ def __init__(self, bs1, bs2):
+ RecipeVerDiff = namedtuple('RecipeVerDiff', 'left right')
+
+ recipes1 = set(bs1.keys())
+ recipes2 = set(bs2.keys())
+
+ self.new = dict([(r, bs2[r]) for r in sorted(recipes2 - recipes1)])
+ self.dropped = dict([(r, bs1[r]) for r in sorted(recipes1 - recipes2)])
+ self.echanged = {}
+ self.vchanged = {}
+ self.rchanged = {}
+ self.unchanged = {}
+ self.empty_diff = False
+
+ common = recipes2.intersection(recipes1)
+ if common:
+ for recipe in common:
+ rdiff = RecipeVerDiff(bs1[recipe], bs2[recipe])
+ if bs1[recipe].epoch != bs2[recipe].epoch:
+ self.echanged[recipe] = rdiff
+ elif bs1[recipe].version != bs2[recipe].version:
+ self.vchanged[recipe] = rdiff
+ elif bs1[recipe].revision != bs2[recipe].revision:
+ self.rchanged[recipe] = rdiff
+ else:
+ self.unchanged[recipe] = rdiff
+
+ if len(recipes1) == len(recipes2) == len(self.unchanged):
+ self.empty_diff = True
+
+ def __bool__(self):
+ return not self.empty_diff
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
new file mode 100644
index 0000000000..5aeec2f00f
--- /dev/null
+++ b/scripts/lib/checklayer/__init__.py
@@ -0,0 +1,398 @@
+# Yocto Project layer check tool
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import re
+import subprocess
+from enum import Enum
+
+import bb.tinfoil
+
+class LayerType(Enum):
+ BSP = 0
+ DISTRO = 1
+ SOFTWARE = 2
+ ERROR_NO_LAYER_CONF = 98
+ ERROR_BSP_DISTRO = 99
+
+def _get_configurations(path):
+ configs = []
+
+ for f in os.listdir(path):
+ file_path = os.path.join(path, f)
+ if os.path.isfile(file_path) and f.endswith('.conf'):
+ configs.append(f[:-5]) # strip .conf
+ return configs
+
+def _get_layer_collections(layer_path, lconf=None, data=None):
+ import bb.parse
+ import bb.data
+
+ if lconf is None:
+ lconf = os.path.join(layer_path, 'conf', 'layer.conf')
+
+ if data is None:
+ ldata = bb.data.init()
+ bb.parse.init_parser(ldata)
+ else:
+ ldata = data.createCopy()
+
+ ldata.setVar('LAYERDIR', layer_path)
+ try:
+ ldata = bb.parse.handle(lconf, ldata, include=True)
+ except:
+ raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
+ ldata.expandVarref('LAYERDIR')
+
+ collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split()
+ if not collections:
+ name = os.path.basename(layer_path)
+ collections = [name]
+
+ collections = {c: {} for c in collections}
+ for name in collections:
+ priority = ldata.getVar('BBFILE_PRIORITY_%s' % name)
+ pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
+ depends = ldata.getVar('LAYERDEPENDS_%s' % name)
+ compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
+ collections[name]['priority'] = priority
+ collections[name]['pattern'] = pattern
+ collections[name]['depends'] = depends
+ collections[name]['compat'] = compat
+
+ return collections
+
+def _detect_layer(layer_path):
+ """
+ Scans layer directory to detect what type of layer
+ is BSP, Distro or Software.
+
+ Returns a dictionary with layer name, type and path.
+ """
+
+ layer = {}
+ layer_name = os.path.basename(layer_path)
+
+ layer['name'] = layer_name
+ layer['path'] = layer_path
+ layer['conf'] = {}
+
+ if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')):
+ layer['type'] = LayerType.ERROR_NO_LAYER_CONF
+ return layer
+
+ machine_conf = os.path.join(layer_path, 'conf', 'machine')
+ distro_conf = os.path.join(layer_path, 'conf', 'distro')
+
+ is_bsp = False
+ is_distro = False
+
+ if os.path.isdir(machine_conf):
+ machines = _get_configurations(machine_conf)
+ if machines:
+ is_bsp = True
+
+ if os.path.isdir(distro_conf):
+ distros = _get_configurations(distro_conf)
+ if distros:
+ is_distro = True
+
+ if is_bsp and is_distro:
+ layer['type'] = LayerType.ERROR_BSP_DISTRO
+ elif is_bsp:
+ layer['type'] = LayerType.BSP
+ layer['conf']['machines'] = machines
+ elif is_distro:
+ layer['type'] = LayerType.DISTRO
+ layer['conf']['distros'] = distros
+ else:
+ layer['type'] = LayerType.SOFTWARE
+
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ return layer
+
+def detect_layers(layer_directories, no_auto):
+ layers = []
+
+ for directory in layer_directories:
+ directory = os.path.realpath(directory)
+ if directory[-1] == '/':
+ directory = directory[0:-1]
+
+ if no_auto:
+ conf_dir = os.path.join(directory, 'conf')
+ if os.path.isdir(conf_dir):
+ layer = _detect_layer(directory)
+ if layer:
+ layers.append(layer)
+ else:
+ for root, dirs, files in os.walk(directory):
+ dir_name = os.path.basename(root)
+ conf_dir = os.path.join(root, 'conf')
+ if os.path.isdir(conf_dir):
+ layer = _detect_layer(root)
+ if layer:
+ layers.append(layer)
+
+ return layers
+
+def _find_layer_depends(depend, layers):
+ for layer in layers:
+ for collection in layer['collections']:
+ if depend == collection:
+ return layer
+ return None
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+ def recurse_dependencies(depends, layer, layers, logger, ret = []):
+ logger.debug('Processing dependencies %s for layer %s.' % \
+ (depends, layer['name']))
+
+ for depend in depends.split():
+ # core (oe-core) is suppose to be provided
+ if depend == 'core':
+ continue
+
+ layer_depend = _find_layer_depends(depend, layers)
+ if not layer_depend:
+ logger.error('Layer %s depends on %s and isn\'t found.' % \
+ (layer['name'], depend))
+ ret = None
+ continue
+
+ # We keep processing, even if ret is None, this allows us to report
+ # multiple errors at once
+ if ret is not None and layer_depend not in ret:
+ ret.append(layer_depend)
+ else:
+ # we might have processed this dependency already, in which case
+ # we should not do it again (avoid recursive loop)
+ continue
+
+ # Recursively process...
+ if 'collections' not in layer_depend:
+ continue
+
+ for collection in layer_depend['collections']:
+ collect_deps = layer_depend['collections'][collection]['depends']
+ if not collect_deps:
+ continue
+ ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret)
+
+ return ret
+
+ layer_depends = []
+ for collection in layer['collections']:
+ depends = layer['collections'][collection]['depends']
+ if not depends:
+ continue
+
+ layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
+
+ # Note: [] (empty) is allowed, None is not!
+ if layer_depends is None:
+ return False
+ else:
+ add_layers(bblayersconf, layer_depends, logger)
+
+ return True
+
+def add_layers(bblayersconf, layers, logger):
+ # Don't add a layer that is already present.
+ added = set()
+ output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
+ for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
+ added.add(path)
+
+ with open(bblayersconf, 'a+') as f:
+ for layer in layers:
+ logger.info('Adding layer %s' % layer['name'])
+ name = layer['name']
+ path = layer['path']
+ if path in added:
+ logger.info('%s is already in %s' % (name, bblayersconf))
+ else:
+ added.add(path)
+ f.write("\nBBLAYERS += \"%s\"\n" % path)
+ return True
+
+def check_command(error_msg, cmd, cwd=None):
+ '''
+ Run a command under a shell, capture stdout and stderr in a single stream,
+ throw an error when command returns non-zero exit code. Returns the output.
+ '''
+
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
+ output, _ = p.communicate()
+ if p.returncode:
+ msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8'))
+ raise RuntimeError(msg)
+ return output
+
+def get_signatures(builddir, failsafe=False, machine=None):
+ import re
+
+ # some recipes needs to be excluded like meta-world-pkgdata
+ # because a layer can add recipes to a world build so signature
+ # will be change
+ exclude_recipes = ('meta-world-pkgdata',)
+
+ sigs = {}
+ tune2tasks = {}
+
+ cmd = 'BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
+ if machine:
+ cmd += 'MACHINE=%s ' % machine
+ cmd += 'bitbake '
+ if failsafe:
+ cmd += '-k '
+ cmd += '-S none world'
+ sigs_file = os.path.join(builddir, 'locked-sigs.inc')
+ if os.path.exists(sigs_file):
+ os.unlink(sigs_file)
+ try:
+ check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.',
+ cmd, builddir)
+ except RuntimeError as ex:
+ if failsafe and os.path.exists(sigs_file):
+ # Ignore the error here. Most likely some recipes active
+ # in a world build lack some dependencies. There is a
+ # separate test_machine_world_build which exposes the
+ # failure.
+ pass
+ else:
+ raise
+
+ sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ current_tune = None
+ with open(sigs_file, 'r') as f:
+ for line in f.readlines():
+ line = line.strip()
+ t = tune_regex.search(line)
+ if t:
+ current_tune = t.group('tune')
+ s = sig_regex.match(line)
+ if s:
+ exclude = False
+ for er in exclude_recipes:
+ (recipe, task) = s.group('task').split(':')
+ if er == recipe:
+ exclude = True
+ break
+ if exclude:
+ continue
+
+ sigs[s.group('task')] = s.group('hash')
+ tune2tasks.setdefault(current_tune, []).append(s.group('task'))
+
+ if not sigs:
+ raise RuntimeError('Can\'t load signatures from %s' % sigs_file)
+
+ return (sigs, tune2tasks)
+
+def get_depgraph(targets=['world'], failsafe=False):
+ '''
+ Returns the dependency graph for the given target(s).
+ The dependency graph is taken directly from DepTreeEvent.
+ '''
+ depgraph = None
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+ tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted'])
+ if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'):
+ raise RuntimeError('starting generateDepTreeEvent failed')
+ while True:
+ event = tinfoil.wait_event(timeout=1000)
+ if event:
+ if isinstance(event, bb.command.CommandFailed):
+ raise RuntimeError('Generating dependency information failed: %s' % event.error)
+ elif isinstance(event, bb.command.CommandCompleted):
+ break
+ elif isinstance(event, bb.event.NoProvider):
+ if failsafe:
+ # The event is informational, we will get information about the
+ # remaining dependencies eventually and thus can ignore this
+ # here like we do in get_signatures(), if desired.
+ continue
+ if event._reasons:
+ raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons))
+ else:
+ raise RuntimeError('Nothing provides %s.' % (event._item))
+ elif isinstance(event, bb.event.DepTreeGenerated):
+ depgraph = event._depgraph
+
+ if depgraph is None:
+ raise RuntimeError('Could not retrieve the depgraph.')
+ return depgraph
+
+def compare_signatures(old_sigs, curr_sigs):
+ '''
+ Compares the result of two get_signatures() calls. Returns None if no
+ problems found, otherwise a string that can be used as additional
+ explanation in self.fail().
+ '''
+ # task -> (old signature, new signature)
+ sig_diff = {}
+ for task in old_sigs:
+ if task in curr_sigs and \
+ old_sigs[task] != curr_sigs[task]:
+ sig_diff[task] = (old_sigs[task], curr_sigs[task])
+
+ if not sig_diff:
+ return None
+
+ # Beware, depgraph uses task=<pn>.<taskname> whereas get_signatures()
+ # uses <pn>:<taskname>. Need to convert sometimes. The output follows
+ # the convention from get_signatures() because that seems closer to
+ # normal bitbake output.
+ def sig2graph(task):
+ pn, taskname = task.rsplit(':', 1)
+ return pn + '.' + taskname
+ def graph2sig(task):
+ pn, taskname = task.rsplit('.', 1)
+ return pn + ':' + taskname
+ depgraph = get_depgraph(failsafe=True)
+ depends = depgraph['tdepends']
+
+ # If a task A has a changed signature, but none of its
+ # dependencies, then we need to report it because it is
+ # the one which introduces a change. Any task depending on
+ # A (directly or indirectly) will also have a changed
+ # signature, but we don't need to report it. It might have
+ # its own changes, which will become apparent once the
+ # issues that we do report are fixed and the test gets run
+ # again.
+ sig_diff_filtered = []
+ for task, (old_sig, new_sig) in sig_diff.items():
+ deps_tainted = False
+ for dep in depends.get(sig2graph(task), ()):
+ if graph2sig(dep) in sig_diff:
+ deps_tainted = True
+ break
+ if not deps_tainted:
+ sig_diff_filtered.append((task, old_sig, new_sig))
+
+ msg = []
+ msg.append('%d signatures changed, initial differences (first hash before, second after):' %
+ len(sig_diff))
+ for diff in sorted(sig_diff_filtered):
+ recipe, taskname = diff[0].rsplit(':', 1)
+ cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \
+ (recipe, taskname, diff[1], diff[2])
+ msg.append(' %s: %s -> %s' % diff)
+ msg.append(' %s' % cmd)
+ try:
+ output = check_command('Determining signature difference failed.',
+ cmd).decode('utf-8')
+ except RuntimeError as error:
+ output = str(error)
+ if output:
+ msg.extend([' ' + line for line in output.splitlines()])
+ msg.append('')
+ return '\n'.join(msg)
diff --git a/scripts/lib/checklayer/case.py b/scripts/lib/checklayer/case.py
new file mode 100644
index 0000000000..fa9dee384e
--- /dev/null
+++ b/scripts/lib/checklayer/case.py
@@ -0,0 +1,9 @@
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.core.case import OETestCase
+
+class OECheckLayerTestCase(OETestCase):
+ pass
diff --git a/scripts/lib/wic/imager/__init__.py b/scripts/lib/checklayer/cases/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/scripts/lib/wic/imager/__init__.py
+++ b/scripts/lib/checklayer/cases/__init__.py
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
new file mode 100644
index 0000000000..7fd56f5d36
--- /dev/null
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -0,0 +1,206 @@
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+
+from checklayer import LayerType, get_signatures, check_command, get_depgraph
+from checklayer.case import OECheckLayerTestCase
+
+class BSPCheckLayer(OECheckLayerTestCase):
+ @classmethod
+ def setUpClass(self):
+ if self.tc.layer['type'] != LayerType.BSP:
+ raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
+ self.tc.layer['name'])
+
+ def test_bsp_defines_machines(self):
+ self.assertTrue(self.tc.layer['conf']['machines'],
+ "Layer is BSP but doesn't defines machines.")
+
+ def test_bsp_no_set_machine(self):
+ from oeqa.utils.commands import get_bb_var
+
+ machine = get_bb_var('MACHINE')
+ self.assertEqual(self.td['bbvars']['MACHINE'], machine,
+ msg="Layer %s modified machine %s -> %s" % \
+ (self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine))
+
+
+ def test_machine_world(self):
+ '''
+ "bitbake world" is expected to work regardless which machine is selected.
+ BSP layers sometimes break that by enabling a recipe for a certain machine
+ without checking whether that recipe actually can be built in the current
+ distro configuration (for example, OpenGL might not enabled).
+
+ This test iterates over all machines. It would be nicer to instantiate
+ it once per machine. It merely checks for errors during parse
+ time. It does not actually attempt to build anything.
+ '''
+
+ if not self.td['machines']:
+ self.skipTest('No machines set with --machines.')
+ msg = []
+ for machine in self.td['machines']:
+ # In contrast to test_machine_signatures() below, errors are fatal here.
+ try:
+ get_signatures(self.td['builddir'], failsafe=False, machine=machine)
+ except RuntimeError as ex:
+ msg.append(str(ex))
+ if msg:
+ msg.insert(0, 'The following machines broke a world build:')
+ self.fail('\n'.join(msg))
+
+ def test_machine_signatures(self):
+ '''
+ Selecting a machine may only affect the signature of tasks that are specific
+ to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe
+ foo and the output of foo, then both machine configurations must build foo
+ in exactly the same way. Otherwise it is not possible to use both machines
+ in the same distribution.
+
+ This criteria can only be tested by testing different machines in combination,
+ i.e. one main layer, potentially several additional BSP layers and an explicit
+ choice of machines:
+ yocto-check-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale
+ '''
+
+ if not self.td['machines']:
+ self.skipTest('No machines set with --machines.')
+
+ # Collect signatures for all machines that we are testing
+ # and merge that into a hash:
+ # tune -> task -> signature -> list of machines with that combination
+ #
+ # It is an error if any tune/task pair has more than one signature,
+ # because that implies that the machines that caused those different
+ # signatures do not agree on how to execute the task.
+ tunes = {}
+ # Preserve ordering of machines as chosen by the user.
+ for machine in self.td['machines']:
+ curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine)
+ # Invert the tune -> [tasks] mapping.
+ tasks2tune = {}
+ for tune, tasks in tune2tasks.items():
+ for task in tasks:
+ tasks2tune[task] = tune
+ for task, sighash in curr_sigs.items():
+ tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine)
+
+ msg = []
+ pruned = 0
+ last_line_key = None
+ # do_fetch, do_unpack, ..., do_build
+ taskname_list = []
+ if tunes:
+ # The output below is most useful when we start with tasks that are at
+ # the bottom of the dependency chain, i.e. those that run first. If
+ # those tasks differ, the rest also does.
+ #
+ # To get an ordering of tasks, we do a topological sort of the entire
+ # depgraph for the base configuration, then on-the-fly flatten that list by stripping
+ # out the recipe names and removing duplicates. The base configuration
+ # is not necessarily representative, but should be close enough. Tasks
+ # that were not encountered get a default priority.
+ depgraph = get_depgraph()
+ depends = depgraph['tdepends']
+ WHITE = 1
+ GRAY = 2
+ BLACK = 3
+ color = {}
+ found = set()
+ def visit(task):
+ color[task] = GRAY
+ for dep in depends.get(task, ()):
+ if color.setdefault(dep, WHITE) == WHITE:
+ visit(dep)
+ color[task] = BLACK
+ pn, taskname = task.rsplit('.', 1)
+ if taskname not in found:
+ taskname_list.append(taskname)
+ found.add(taskname)
+ for task in depends.keys():
+ if color.setdefault(task, WHITE) == WHITE:
+ visit(task)
+
+ taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ])
+ def task_key(task):
+ pn, taskname = task.rsplit(':', 1)
+ return (pn, taskname_order.get(taskname, len(taskname_list)), taskname)
+
+ for tune in sorted(tunes.keys()):
+ tasks = tunes[tune]
+ # As for test_signatures it would be nicer to sort tasks
+ # by dependencies here, but that is harder because we have
+ # to report on tasks from different machines, which might
+ # have different dependencies. We resort to pruning the
+ # output by reporting only one task per recipe if the set
+ # of machines matches.
+ #
+ # "bitbake-diffsigs -t -s" is intelligent enough to print
+ # diffs recursively, so often it does not matter that much
+ # if we don't pick the underlying difference
+ # here. However, sometimes recursion fails
+ # (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428).
+ #
+ # To mitigate that a bit, we use a hard-coded ordering of
+ # tasks that represents how they normally run and prefer
+ # to print the ones that run first.
+ for task in sorted(tasks.keys(), key=task_key):
+ signatures = tasks[task]
+ # do_build can be ignored: it is know to have
+ # different signatures in some cases, for example in
+ # the allarch ca-certificates due to RDEPENDS=openssl.
+ # That particular dependency is whitelisted via
+ # SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
+ # in the sstate signature hash because filtering it
+ # out would be hard and running do_build multiple
+ # times doesn't really matter.
+ if len(signatures.keys()) > 1 and \
+ not task.endswith(':do_build'):
+ # Error!
+ #
+ # Sort signatures by machines, because the hex values don't mean anything.
+ # => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64)
+ #
+ # Skip the line if it is covered already by the predecessor (same pn, same sets of machines).
+ pn, taskname = task.rsplit(':', 1)
+ next_line_key = (pn, sorted(signatures.values()))
+ if next_line_key != last_line_key:
+ line = ' %s %s: ' % (tune, task)
+ line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for
+ signature in sorted(signatures.keys(), key=lambda s: signatures[s])])
+ last_line_key = next_line_key
+ msg.append(line)
+ # Randomly pick two mismatched signatures and remember how to invoke
+ # bitbake-diffsigs for them.
+ iterator = iter(signatures.items())
+ a = next(iterator)
+ b = next(iterator)
+ diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1]))
+ diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0])
+ else:
+ pruned += 1
+
+ if msg:
+ msg.insert(0, 'The machines have conflicting signatures for some shared tasks:')
+ if pruned > 0:
+ msg.append('')
+ msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned)
+ msg.append('It is likely that differences from different recipes also have the same root cause.')
+ msg.append('')
+ # Explain how to investigate...
+ msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.')
+ cmd = 'bitbake-diffsigs %s' % diffsig_params
+ msg.append('Example: %s in the last line' % diffsig_machines)
+ msg.append('Command: %s' % cmd)
+ # ... and actually do it automatically for that example, but without aborting
+ # when that fails.
+ try:
+ output = check_command('Comparing signatures failed.', cmd).decode('utf-8')
+ except RuntimeError as ex:
+ output = str(ex)
+ msg.extend([' ' + line for line in output.splitlines()])
+ self.fail('\n'.join(msg))
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
new file mode 100644
index 0000000000..b82304e361
--- /dev/null
+++ b/scripts/lib/checklayer/cases/common.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import glob
+import os
+import unittest
+from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
+from checklayer.case import OECheckLayerTestCase
+
+class CommonCheckLayer(OECheckLayerTestCase):
+ def test_readme(self):
+ # The top-level README file may have a suffix (like README.rst or README.txt).
+ readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
+ self.assertTrue(len(readme_files) > 0,
+ msg="Layer doesn't contains README file.")
+
+ # There might be more than one file matching the file pattern above
+ # (for example, README.rst and README-COPYING.rst). The one with the shortest
+ # name is considered the "main" one.
+ readme_file = sorted(readme_files)[0]
+ data = ''
+ with open(readme_file, 'r') as f:
+ data = f.read()
+ self.assertTrue(data,
+ msg="Layer contains a README file but it is empty.")
+
+ def test_parse(self):
+ check_command('Layer %s failed to parse.' % self.tc.layer['name'],
+ 'bitbake -p')
+
+ def test_show_environment(self):
+ check_command('Layer %s failed to show environment.' % self.tc.layer['name'],
+ 'bitbake -e')
+
+ def test_world(self):
+ '''
+ "bitbake world" is expected to work. test_signatures does not cover that
+ because it is more lenient and ignores recipes in a world build that
+ are not actually buildable, so here we fail when "bitbake -S none world"
+ fails.
+ '''
+ get_signatures(self.td['builddir'], failsafe=False)
+
+ def test_signatures(self):
+ if self.tc.layer['type'] == LayerType.SOFTWARE and \
+ not self.tc.test_software_layer_signatures:
+ raise unittest.SkipTest("Not testing for signature changes in a software layer %s." \
+ % self.tc.layer['name'])
+
+ curr_sigs, _ = get_signatures(self.td['builddir'], failsafe=True)
+ msg = compare_signatures(self.td['sigs'], curr_sigs)
+ if msg is not None:
+ self.fail('Adding layer %s changed signatures.\n%s' % (self.tc.layer['name'], msg))
+
+ def test_layerseries_compat(self):
+ for collection_name, collection_data in self.tc.layer['collections'].items():
+ self.assertTrue(collection_data['compat'], "Collection %s from layer %s does not set compatible oe-core versions via LAYERSERIES_COMPAT_collection." \
+ % (collection_name, self.tc.layer['name']))
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
new file mode 100644
index 0000000000..f0bee5493c
--- /dev/null
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -0,0 +1,28 @@
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import unittest
+
+from checklayer import LayerType
+from checklayer.case import OECheckLayerTestCase
+
+class DistroCheckLayer(OECheckLayerTestCase):
+ @classmethod
+ def setUpClass(self):
+ if self.tc.layer['type'] != LayerType.DISTRO:
+ raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
+ self.tc.layer['name'])
+
+ def test_distro_defines_distros(self):
+ self.assertTrue(self.tc.layer['conf']['distros'],
+ "Layer is BSP but doesn't defines machines.")
+
+ def test_distro_no_set_distros(self):
+ from oeqa.utils.commands import get_bb_var
+
+ distro = get_bb_var('DISTRO')
+ self.assertEqual(self.td['bbvars']['DISTRO'], distro,
+ msg="Layer %s modified distro %s -> %s" % \
+ (self.tc.layer['name'], self.td['bbvars']['DISTRO'], distro))
diff --git a/scripts/lib/checklayer/context.py b/scripts/lib/checklayer/context.py
new file mode 100644
index 0000000000..4de8f668fd
--- /dev/null
+++ b/scripts/lib/checklayer/context.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import glob
+import re
+
+from oeqa.core.context import OETestContext
+
+class CheckLayerTestContext(OETestContext):
+ def __init__(self, td=None, logger=None, layer=None, test_software_layer_signatures=True):
+ super(CheckLayerTestContext, self).__init__(td, logger)
+ self.layer = layer
+ self.test_software_layer_signatures = test_software_layer_signatures
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
index b432e3d44e..d39c474fbd 100644
--- a/scripts/lib/devtool/__init__.py
+++ b/scripts/lib/devtool/__init__.py
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugins module"""
import os
@@ -23,6 +13,7 @@ import sys
import subprocess
import logging
import re
+import codecs
logger = logging.getLogger('devtool')
@@ -67,10 +58,10 @@ def exec_watch(cmd, **options):
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
)
+ reader = codecs.getreader('utf-8')(process.stdout)
buf = ''
while True:
- out = process.stdout.read(1)
- out = out.decode('utf-8')
+ out = reader.read(1, 1)
if out:
sys.stdout.write(out)
sys.stdout.flush()
@@ -86,13 +77,13 @@ def exec_watch(cmd, **options):
def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
- fakerootcmd = d.getVar('FAKEROOTCMD', True)
+ fakerootcmd = d.getVar('FAKEROOTCMD')
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
- fakerootenv = d.getVar('FAKEROOTENV', True)
+ fakerootenv = d.getVar('FAKEROOTENV')
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
@@ -113,40 +104,40 @@ def setup_tinfoil(config_only=False, basepath=None, tracking=False):
import bb.tinfoil
tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
- tinfoil.prepare(config_only)
- tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ try:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only)
+ except bb.tinfoil.TinfoilUIException:
+ tinfoil.shutdown()
+ raise DevtoolError('Failed to start bitbake environment')
+ except:
+ tinfoil.shutdown()
+ raise
finally:
os.chdir(orig_cwd)
return tinfoil
-def get_recipe_file(cooker, pn):
- """Find recipe file corresponding a package name"""
- import oe.recipeutils
- recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
- if not recipefile:
- skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
- if skipreasons:
- logger.error('\n'.join(skipreasons))
- else:
- logger.error("Unable to find any recipe file matching %s" % pn)
- return recipefile
-
def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
- """Parse recipe of a package"""
- import oe.recipeutils
- recipefile = get_recipe_file(tinfoil.cooker, pn)
- if not recipefile:
- # Error already logged
+ """Parse the specified recipe"""
+ try:
+ recipefile = tinfoil.get_recipe_file(pn)
+ except bb.providers.NoProvider as e:
+ logger.error(str(e))
return None
if appends:
- append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
+ append_files = tinfoil.get_file_appends(recipefile)
if filter_workspace:
# Filter out appends from the workspace
append_files = [path for path in append_files if
not path.startswith(config.workspace_path)]
else:
append_files = None
- return oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
+ try:
+ rd = tinfoil.parse_recipe_file(recipefile, appends, append_files)
+ except Exception as e:
+ logger.error(str(e))
+ return None
+ return rd
def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
"""
@@ -190,7 +181,7 @@ def use_external_build(same_dir, no_same_dir, d):
logger.info('Using source tree as build directory since --same-dir specified')
elif bb.data.inherits_class('autotools-brokensep', d):
logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
- elif d.getVar('B', True) == os.path.abspath(d.getVar('S', True)):
+ elif os.path.abspath(d.getVar('B')) == os.path.abspath(d.getVar('S')):
logger.info('Using source tree as build directory since that would be the default for this recipe')
else:
b_is_s = False
@@ -204,6 +195,7 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
import oe.patch
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
+ bb.process.run('git config --local gc.autodetach 0', cwd=repodir)
bb.process.run('git add .', cwd=repodir)
commit_cmd = ['git']
oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
@@ -219,6 +211,20 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
commit_cmd += ['-m', commitmsg]
bb.process.run(commit_cmd, cwd=repodir)
+ # Ensure singletask.lock (as used by externalsrc.bbclass) is ignored by git
+ excludes = []
+ excludefile = os.path.join(repodir, '.git', 'info', 'exclude')
+ try:
+ with open(excludefile, 'r') as f:
+ excludes = f.readlines()
+ except FileNotFoundError:
+ pass
+ if 'singletask.lock\n' not in excludes:
+ excludes.append('singletask.lock\n')
+ with open(excludefile, 'w') as f:
+ for line in excludes:
+ f.write(line)
+
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
@@ -259,3 +265,110 @@ def get_bbclassextend_targets(recipefile, pn):
elif variant in ['native', 'cross', 'crosssdk']:
targets.append('%s-%s' % (pn, variant))
return targets
+
+def replace_from_file(path, old, new):
+ """Replace strings on a file"""
+
+ def read_file(path):
+ data = None
+ with open(path) as f:
+ data = f.read()
+ return data
+
+ def write_file(path, data):
+ if data is None:
+ return
+ wdata = data.rstrip() + "\n"
+ with open(path, "w") as f:
+ f.write(wdata)
+
+ # In case old is None, return immediately
+ if old is None:
+ return
+ try:
+ rdata = read_file(path)
+ except IOError as e:
+ # if file does not exit, just quit, otherwise raise an exception
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+ old_contents = rdata.splitlines()
+ new_contents = []
+ for old_content in old_contents:
+ try:
+ new_contents.append(old_content.replace(old, new))
+ except ValueError:
+ pass
+ write_file(path, "\n".join(new_contents))
+
+
+def update_unlockedsigs(basepath, workspace, fixed_setup, extra=None):
+ """ This function will make unlocked-sigs.inc match the recipes in the
+ workspace plus any extras we want unlocked. """
+
+ if not fixed_setup:
+ # Only need to write this out within the eSDK
+ return
+
+ if not extra:
+ extra = []
+
+ confdir = os.path.join(basepath, 'conf')
+ unlockedsigs = os.path.join(confdir, 'unlocked-sigs.inc')
+
+ # Get current unlocked list if any
+ values = {}
+ def get_unlockedsigs_varfunc(varname, origvalue, op, newlines):
+ values[varname] = origvalue
+ return origvalue, None, 0, True
+ if os.path.exists(unlockedsigs):
+ with open(unlockedsigs, 'r') as f:
+ bb.utils.edit_metadata(f, ['SIGGEN_UNLOCKED_RECIPES'], get_unlockedsigs_varfunc)
+ unlocked = sorted(values.get('SIGGEN_UNLOCKED_RECIPES', []))
+
+ # If the new list is different to the current list, write it out
+ newunlocked = sorted(list(workspace.keys()) + extra)
+ if unlocked != newunlocked:
+ bb.utils.mkdirhier(confdir)
+ with open(unlockedsigs, 'w') as f:
+ f.write("# DO NOT MODIFY! YOUR CHANGES WILL BE LOST.\n" +
+ "# This layer was created by the OpenEmbedded devtool" +
+ " utility in order to\n" +
+ "# contain recipes that are unlocked.\n")
+
+ f.write('SIGGEN_UNLOCKED_RECIPES += "\\\n')
+ for pn in newunlocked:
+ f.write(' ' + pn)
+ f.write('"')
+
+def check_prerelease_version(ver, operation):
+ if 'pre' in ver or 'rc' in ver:
+ logger.warning('Version "%s" looks like a pre-release version. '
+ 'If that is the case, in order to ensure that the '
+ 'version doesn\'t appear to go backwards when you '
+ 'later upgrade to the final release version, it is '
+ 'recommmended that instead you use '
+ '<current version>+<pre-release version> e.g. if '
+ 'upgrading from 1.9 to 2.0-rc2 use "1.9+2.0-rc2". '
+ 'If you prefer not to reset and re-try, you can change '
+ 'the version after %s succeeds using "devtool rename" '
+ 'with -V/--version.' % (ver, operation))
+
+def check_git_repo_dirty(repodir):
+ """Check if a git repository is clean or not"""
+ stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
+ return stdout
+
+def check_git_repo_op(srctree, ignoredirs=None):
+ """Check if a git repository is in the middle of a rebase"""
+ stdout, _ = bb.process.run('git rev-parse --show-toplevel', cwd=srctree)
+ topleveldir = stdout.strip()
+ if ignoredirs and topleveldir in ignoredirs:
+ return
+ gitdir = os.path.join(topleveldir, '.git')
+ if os.path.exists(os.path.join(gitdir, 'rebase-merge')):
+ raise DevtoolError("Source tree %s appears to be in the middle of a rebase - please resolve this first" % srctree)
+ if os.path.exists(os.path.join(gitdir, 'rebase-apply')):
+ raise DevtoolError("Source tree %s appears to be in the middle of 'git am' or 'git apply' - please resolve this first" % srctree)
diff --git a/scripts/lib/devtool/build.py b/scripts/lib/devtool/build.py
index 6be549dd59..935ffab46c 100644
--- a/scripts/lib/devtool/build.py
+++ b/scripts/lib/devtool/build.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool build plugin"""
import os
@@ -21,7 +11,8 @@ import bb
import logging
import argparse
import tempfile
-from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
logger = logging.getLogger('devtool')
@@ -53,8 +44,22 @@ def _get_build_tasks(config):
def build(args, config, basepath, workspace):
"""Entry point for the devtool 'build' subcommand"""
workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ deploytask = 'do_deploy' in rd.getVar('__BBTASKS')
+ finally:
+ tinfoil.shutdown()
- build_tasks = _get_build_tasks(config)
+ if args.clean:
+ # use clean instead of cleansstate to avoid messing things up in eSDK
+ build_tasks = ['do_clean']
+ else:
+ build_tasks = _get_build_tasks(config)
+ if deploytask:
+ build_tasks.append('do_deploy')
bbappend = workspace[workspacepn]['bbappend']
if args.disable_parallel_make:
@@ -80,7 +85,8 @@ def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
parser_build = subparsers.add_parser('build', help='Build a recipe',
description='Builds the specified recipe using bitbake (up to and including %s)' % ', '.join(_get_build_tasks(context.config)),
- group='working')
+ group='working', order=50)
parser_build.add_argument('recipename', help='Recipe to build')
parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
+ parser_build.add_argument('-c', '--clean', action='store_true', help='clean up recipe building results')
parser_build.set_defaults(func=build)
diff --git a/scripts/lib/devtool/build_image.py b/scripts/lib/devtool/build_image.py
index ae75511dc7..9388abbacf 100644
--- a/scripts/lib/devtool/build_image.py
+++ b/scripts/lib/devtool/build_image.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the build-image subcommand."""
@@ -34,8 +24,8 @@ def _get_packages(tinfoil, workspace, config):
result = []
for recipe in workspace:
data = parse_recipe(config, tinfoil, recipe, True)
- if 'class-target' in data.getVar('OVERRIDES', True).split(':'):
- if recipe in data.getVar('PACKAGES', True).split():
+ if 'class-target' in data.getVar('OVERRIDES').split(':'):
+ if recipe in data.getVar('PACKAGES').split():
result.append(recipe)
else:
logger.warning("Skipping recipe %s as it doesn't produce a "
@@ -95,7 +85,7 @@ def build_image_task(config, basepath, workspace, image, add_packages=None, task
raise TargetNotImageError()
# Get the actual filename used and strip the .bb and full path
- target_basename = rd.getVar('FILE', True)
+ target_basename = rd.getVar('FILE')
target_basename = os.path.splitext(os.path.basename(target_basename))[0]
config.set('SDK', 'target_basename', target_basename)
config.write()
@@ -132,9 +122,9 @@ def build_image_task(config, basepath, workspace, image, add_packages=None, task
afile.write('%s\n' % line)
if task in ['populate_sdk', 'populate_sdk_ext']:
- outputdir = rd.getVar('SDK_DEPLOY', True)
+ outputdir = rd.getVar('SDK_DEPLOY')
else:
- outputdir = rd.getVar('DEPLOY_DIR_IMAGE', True)
+ outputdir = rd.getVar('DEPLOY_DIR_IMAGE')
tmp_tinfoil = tinfoil
tinfoil = None
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
index b89d65b0cb..6fe02fff2a 100644
--- a/scripts/lib/devtool/build_sdk.py
+++ b/scripts/lib/devtool/build_sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index fb84f2dd08..6a997735fc 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -2,26 +2,20 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the deploy subcommands"""
+import logging
import os
+import shutil
import subprocess
-import logging
import tempfile
-import shutil
+
+import bb.utils
import argparse_oe
+import oe.types
+
from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
@@ -64,7 +58,7 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
lines.append(' rmdir $file > /dev/null 2>&1 || true')
lines.append(' fi')
lines.append(' else')
- lines.append(' rm $file')
+ lines.append(' rm -f $file')
lines.append(' fi')
lines.append(' done')
if not dryrun:
@@ -85,7 +79,7 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
lines.append('do')
lines.append(' checkpath=`dirname "$checkpath"`')
lines.append('done')
- lines.append('freespace=`df -P $checkpath | sed "1d" | awk \'{ print $4 }\'`')
+ lines.append(r'freespace=$(df -P $checkpath | sed -nre "s/^(\S+\s+){3}([0-9]+).*/\2/p")')
# First line of the file is the total space
lines.append('total=`head -n1 $3`')
lines.append('if [ $total -gt $freespace ] ; then')
@@ -119,7 +113,11 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
# Put any preserved files back
lines.append('if [ -d $preservedir ] ; then')
lines.append(' cd $preservedir')
- lines.append(' find . -type f -exec mv {} /{} \;')
+ # find from busybox might not have -exec, so we don't use that
+ lines.append(' find . -type f | while read file')
+ lines.append(' do')
+ lines.append(' mv $file /$file')
+ lines.append(' done')
lines.append(' cd /')
lines.append(' rm -rf $preservedir')
lines.append('fi')
@@ -136,11 +134,12 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
return '\n'.join(lines)
+
def deploy(args, config, basepath, workspace):
"""Entry point for the devtool 'deploy' subcommand"""
- import re
import math
import oe.recipeutils
+ import oe.package
check_workspace_recipe(workspace, args.recipename, checksrc=False)
@@ -156,16 +155,27 @@ def deploy(args, config, basepath, workspace):
tinfoil = setup_tinfoil(basepath=basepath)
try:
try:
- rd = oe.recipeutils.parse_recipe_simple(tinfoil.cooker, args.recipename, tinfoil.config_data)
+ rd = tinfoil.parse_recipe(args.recipename)
except Exception as e:
raise DevtoolError('Exception parsing recipe %s: %s' %
(args.recipename, e))
- recipe_outdir = rd.getVar('D', True)
+ recipe_outdir = rd.getVar('D')
if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
raise DevtoolError('No files to deploy - have you built the %s '
'recipe? If so, the install step has not installed '
'any files.' % args.recipename)
+ if args.strip and not args.dry_run:
+ # Fakeroot copy to new destination
+ srcdir = recipe_outdir
+ recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
+ if os.path.isdir(recipe_outdir):
+ bb.utils.remove(recipe_outdir, True)
+ exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
+ os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
+ oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
+ rd.getVar('base_libdir'), rd)
+
filelist = []
ftotalsize = 0
for root, _, files in os.walk(recipe_outdir):
@@ -185,13 +195,26 @@ def deploy(args, config, basepath, workspace):
print(' %s' % item)
return 0
-
extraoptions = ''
if args.no_host_check:
extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
+ if args.key:
+ extraoptions += ' -i %s' % args.key
+
# In order to delete previously deployed files and have the manifest file on
# the target, we write out a shell script and then copy it to the target
# so we can then run it (piping tar output to it).
@@ -213,7 +236,7 @@ def deploy(args, config, basepath, workspace):
for fpath, fsize in filelist:
f.write('%s %d\n' % (fpath, fsize))
# Copy them to the target
- ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -221,7 +244,7 @@ def deploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s \'sh %s %s %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
if ret != 0:
raise DevtoolError('Deploy failed - rerun with -s to get a complete '
'error message')
@@ -251,6 +274,17 @@ def undeploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
args.target = args.target.split(':')[0]
tmpdir = tempfile.mkdtemp(prefix='devtool')
@@ -261,7 +295,7 @@ def undeploy(args, config, basepath, workspace):
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Copy it to the target
- ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -269,7 +303,7 @@ def undeploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = subprocess.call('ssh %s %s \'sh %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
if ret != 0:
raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
'error message')
@@ -281,6 +315,7 @@ def undeploy(args, config, basepath, workspace):
def register_commands(subparsers, context):
"""Register devtool subcommands from the deploy plugin"""
+
parser_deploy = subparsers.add_parser('deploy-target',
help='Deploy recipe output files to live target machine',
description='Deploys a recipe\'s build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have been installed on the target beforehand.',
@@ -292,6 +327,19 @@ def register_commands(subparsers, context):
parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
+ parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_deploy.add_argument('-I', '--key',
+ help='Specifiy ssh private key for connection to the target')
+
+ strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
+ strip_opts.add_argument('-S', '--strip',
+ help='Strip executables prior to deploying (default: %(default)s). '
+ 'The default value of this option can be controlled by setting the strip option in the [Deploy] section to True or False.',
+ default=oe.types.boolean(context.config.get('Deploy', 'strip', default='0')),
+ action='store_true')
+ strip_opts.add_argument('--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
+
parser_deploy.set_defaults(func=deploy)
parser_undeploy = subparsers.add_parser('undeploy-target',
@@ -304,4 +352,9 @@ def register_commands(subparsers, context):
parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
+ parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_undeploy.add_argument('-I', '--key',
+ help='Specifiy ssh private key for connection to the target')
+
parser_undeploy.set_defaults(func=undeploy)
diff --git a/scripts/lib/devtool/export.py b/scripts/lib/devtool/export.py
new file mode 100644
index 0000000000..01174edae5
--- /dev/null
+++ b/scripts/lib/devtool/export.py
@@ -0,0 +1,109 @@
+# Development tool - export command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool export plugin"""
+
+import os
+import argparse
+import tarfile
+import logging
+import datetime
+import json
+
+logger = logging.getLogger('devtool')
+
+# output files
+default_arcname_prefix = "workspace-export"
+metadata = '.export_metadata'
+
+def export(args, config, basepath, workspace):
+ """Entry point for the devtool 'export' subcommand"""
+
+ def add_metadata(tar):
+ """Archive the workspace object"""
+ # finally store the workspace metadata
+ with open(metadata, 'w') as fd:
+ fd.write(json.dumps((config.workspace_path, workspace)))
+ tar.add(metadata)
+ os.unlink(metadata)
+
+ def add_recipe(tar, recipe, data):
+ """Archive recipe with proper arcname"""
+ # Create a map of name/arcnames
+ arcnames = []
+ for key, name in data.items():
+ if name:
+ if key == 'srctree':
+ # all sources, no matter where are located, goes into the sources directory
+ arcname = 'sources/%s' % recipe
+ else:
+ arcname = name.replace(config.workspace_path, '')
+ arcnames.append((name, arcname))
+
+ for name, arcname in arcnames:
+ tar.add(name, arcname=arcname)
+
+
+ # Make sure workspace is non-empty and possible listed include/excluded recipes are in workspace
+ if not workspace:
+ logger.info('Workspace contains no recipes, nothing to export')
+ return 0
+ else:
+ for param, recipes in {'include':args.include,'exclude':args.exclude}.items():
+ for recipe in recipes:
+ if recipe not in workspace:
+ logger.error('Recipe (%s) on %s argument not in the current workspace' % (recipe, param))
+ return 1
+
+ name = args.file
+
+ default_name = "%s-%s.tar.gz" % (default_arcname_prefix, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
+ if not name:
+ name = default_name
+ else:
+ # if name is a directory, append the default name
+ if os.path.isdir(name):
+ name = os.path.join(name, default_name)
+
+ if os.path.exists(name) and not args.overwrite:
+ logger.error('Tar archive %s exists. Use --overwrite/-o to overwrite it')
+ return 1
+
+ # if all workspace is excluded, quit
+ if not len(set(workspace.keys()).difference(set(args.exclude))):
+ logger.warning('All recipes in workspace excluded, nothing to export')
+ return 0
+
+ exported = []
+ with tarfile.open(name, 'w:gz') as tar:
+ if args.include:
+ for recipe in args.include:
+ add_recipe(tar, recipe, workspace[recipe])
+ exported.append(recipe)
+ else:
+ for recipe, data in workspace.items():
+ if recipe not in args.exclude:
+ add_recipe(tar, recipe, data)
+ exported.append(recipe)
+
+ add_metadata(tar)
+
+ logger.info('Tar archive created at %s with the following recipes: %s' % (name, ', '.join(exported)))
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool export subcommands"""
+ parser = subparsers.add_parser('export',
+ help='Export workspace into a tar archive',
+ description='Export one or more recipes from current workspace into a tar archive',
+ group='advanced')
+
+ parser.add_argument('--file', '-f', help='Output archive file name')
+ parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite previous export tar archive')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--include', '-i', nargs='+', default=[], help='Include recipes into the tar archive')
+ group.add_argument('--exclude', '-e', nargs='+', default=[], help='Exclude recipes into the tar archive')
+ parser.set_defaults(func=export)
diff --git a/scripts/lib/devtool/import.py b/scripts/lib/devtool/import.py
new file mode 100644
index 0000000000..6829851669
--- /dev/null
+++ b/scripts/lib/devtool/import.py
@@ -0,0 +1,134 @@
+# Development tool - import command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool import plugin"""
+
+import os
+import tarfile
+import logging
+import collections
+import json
+import fnmatch
+
+from devtool import standard, setup_tinfoil, replace_from_file, DevtoolError
+from devtool import export
+
+logger = logging.getLogger('devtool')
+
+def devimport(args, config, basepath, workspace):
+ """Entry point for the devtool 'import' subcommand"""
+
+ def get_pn(name):
+ """ Returns the filename of a workspace recipe/append"""
+ metadata = name.split('/')[-1]
+ fn, _ = os.path.splitext(metadata)
+ return fn
+
+ if not os.path.exists(args.file):
+ raise DevtoolError('Tar archive %s does not exist. Export your workspace using "devtool export"' % args.file)
+
+ with tarfile.open(args.file) as tar:
+ # Get exported metadata
+ export_workspace_path = export_workspace = None
+ try:
+ metadata = tar.getmember(export.metadata)
+ except KeyError as ke:
+ raise DevtoolError('The export metadata file created by "devtool export" was not found. "devtool import" can only be used to import tar archives created by "devtool export".')
+
+ tar.extract(metadata)
+ with open(metadata.name) as fdm:
+ export_workspace_path, export_workspace = json.load(fdm)
+ os.unlink(metadata.name)
+
+ members = tar.getmembers()
+
+ # Get appends and recipes from the exported archive, these
+ # will be needed to find out those appends without corresponding
+ # recipe pair
+ append_fns, recipe_fns = set(), set()
+ for member in members:
+ if member.name.startswith('appends'):
+ append_fns.add(get_pn(member.name))
+ elif member.name.startswith('recipes'):
+ recipe_fns.add(get_pn(member.name))
+
+ # Setup tinfoil, get required data and shutdown
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ current_fns = [os.path.basename(recipe[0]) for recipe in tinfoil.cooker.recipecaches[''].pkg_fn.items()]
+ finally:
+ tinfoil.shutdown()
+
+ # Find those appends that do not have recipes in current metadata
+ non_importables = []
+ for fn in append_fns - recipe_fns:
+ # Check on current metadata (covering those layers indicated in bblayers.conf)
+ for current_fn in current_fns:
+ if fnmatch.fnmatch(current_fn, '*' + fn.replace('%', '') + '*'):
+ break
+ else:
+ non_importables.append(fn)
+ logger.warning('No recipe to append %s.bbapppend, skipping' % fn)
+
+ # Extract
+ imported = []
+ for member in members:
+ if member.name == export.metadata:
+ continue
+
+ for nonimp in non_importables:
+ pn = nonimp.split('_')[0]
+ # do not extract data from non-importable recipes or metadata
+ if member.name.startswith('appends/%s' % nonimp) or \
+ member.name.startswith('recipes/%s' % nonimp) or \
+ member.name.startswith('sources/%s' % pn):
+ break
+ else:
+ path = os.path.join(config.workspace_path, member.name)
+ if os.path.exists(path):
+ # by default, no file overwrite is done unless -o is given by the user
+ if args.overwrite:
+ try:
+ tar.extract(member, path=config.workspace_path)
+ except PermissionError as pe:
+ logger.warning(pe)
+ else:
+ logger.warning('File already present. Use --overwrite/-o to overwrite it: %s' % member.name)
+ continue
+ else:
+ tar.extract(member, path=config.workspace_path)
+
+ # Update EXTERNALSRC and the devtool md5 file
+ if member.name.startswith('appends'):
+ if export_workspace_path:
+ # appends created by 'devtool modify' just need to update the workspace
+ replace_from_file(path, export_workspace_path, config.workspace_path)
+
+ # appends created by 'devtool add' need replacement of exported source tree
+ pn = get_pn(member.name).split('_')[0]
+ exported_srctree = export_workspace[pn]['srctree']
+ if exported_srctree:
+ replace_from_file(path, exported_srctree, os.path.join(config.workspace_path, 'sources', pn))
+
+ standard._add_md5(config, pn, path)
+ imported.append(pn)
+
+ if imported:
+ logger.info('Imported recipes into workspace %s: %s' % (config.workspace_path, ', '.join(imported)))
+ else:
+ logger.warning('No recipes imported into the workspace')
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool import subcommands"""
+ parser = subparsers.add_parser('import',
+ help='Import exported tar archive into workspace',
+ description='Import tar archive previously created by "devtool export" into workspace',
+ group='advanced')
+ parser.add_argument('file', metavar='FILE', help='Name of the tar archive to import')
+ parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite files when extracting')
+ parser.set_defaults(func=devimport)
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
new file mode 100644
index 0000000000..95384c5333
--- /dev/null
+++ b/scripts/lib/devtool/menuconfig.py
@@ -0,0 +1,79 @@
+# OpenEmbedded Development tool - menuconfig command plugin
+#
+# Copyright (C) 2018 Xilinx
+# Written by: Chandana Kalluri <ckalluri@xilinx.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool menuconfig plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+import glob
+from devtool import setup_tinfoil, parse_recipe, DevtoolError, standard, exec_build_env_command
+from devtool import check_workspace_recipe
+logger = logging.getLogger('devtool')
+
+def menuconfig(args, config, basepath, workspace):
+ """Entry point for the devtool 'menuconfig' subcommand"""
+
+ rd = ""
+ kconfigpath = ""
+ pn_src = ""
+ localfilesdir = ""
+ workspace_dir = ""
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.component, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+
+ check_workspace_recipe(workspace, args.component)
+ pn = rd.getVar('PN', True)
+
+ if not rd.getVarFlag('do_menuconfig','task'):
+ raise DevtoolError("This recipe does not support menuconfig option")
+
+ workspace_dir = os.path.join(config.workspace_path,'sources')
+ kconfigpath = rd.getVar('B')
+ pn_src = os.path.join(workspace_dir,pn)
+
+ # add check to see if oe_local_files exists or not
+ localfilesdir = os.path.join(pn_src,'oe-local-files')
+ if not os.path.exists(localfilesdir):
+ bb.utils.mkdirhier(localfilesdir)
+ # Add gitignore to ensure source tree is clean
+ gitignorefile = os.path.join(localfilesdir,'.gitignore')
+ with open(gitignorefile, 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n')
+ f.write('*\n')
+
+ finally:
+ tinfoil.shutdown()
+
+ logger.info('Launching menuconfig')
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c menuconfig %s' % pn, watch=True)
+ fragment = os.path.join(localfilesdir, 'devtool-fragment.cfg')
+ res = standard._create_kconfig_diff(pn_src,rd,fragment)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """register devtool subcommands from this plugin"""
+ parser_menuconfig = subparsers.add_parser('menuconfig',help='Alter build-time configuration for a recipe', description='Launches the make menuconfig command (for recipes where do_menuconfig is available), allowing users to make changes to the build-time configuration. Creates a config fragment corresponding to changes made.', group='advanced')
+ parser_menuconfig.add_argument('component', help='compenent to alter config')
+ parser_menuconfig.set_defaults(func=menuconfig,fixed_setup=context.fixed_setup)
diff --git a/scripts/lib/devtool/package.py b/scripts/lib/devtool/package.py
index afb5809a36..c2367342c3 100644
--- a/scripts/lib/devtool/package.py
+++ b/scripts/lib/devtool/package.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the package subcommands"""
import os
@@ -28,15 +18,13 @@ def package(args, config, basepath, workspace):
"""Entry point for the devtool 'package' subcommand"""
check_workspace_recipe(workspace, args.recipename)
- tinfoil = setup_tinfoil(basepath=basepath)
+ tinfoil = setup_tinfoil(basepath=basepath, config_only=True)
try:
- tinfoil.prepare(config_only=True)
-
image_pkgtype = config.get('Package', 'image_pkgtype', '')
if not image_pkgtype:
- image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE', True)
+ image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE')
- deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper(), True)
+ deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper())
finally:
tinfoil.shutdown()
diff --git a/scripts/lib/devtool/runqemu.py b/scripts/lib/devtool/runqemu.py
index 303abcae4f..ead978aabc 100644
--- a/scripts/lib/devtool/runqemu.py
+++ b/scripts/lib/devtool/runqemu.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool runqemu plugin"""
@@ -31,8 +21,10 @@ def runqemu(args, config, basepath, workspace):
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
- machine = tinfoil.config_data.getVar('MACHINE', True)
- bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
+ machine = tinfoil.config_data.getVar('MACHINE')
+ bindir_native = os.path.join(tinfoil.config_data.getVar('STAGING_DIR'),
+ tinfoil.config_data.getVar('BUILD_ARCH'),
+ tinfoil.config_data.getVar('bindir_native').lstrip(os.path.sep))
finally:
tinfoil.shutdown()
@@ -48,7 +40,12 @@ def runqemu(args, config, basepath, workspace):
raise DevtoolError('Unable to determine image name to run, please specify one')
try:
- exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True)
+ # FIXME runqemu assumes that if OECORE_NATIVE_SYSROOT is set then it shouldn't
+ # run bitbake to find out the values of various environment variables, which
+ # isn't the case for the extensible SDK. Work around it for now.
+ newenv = dict(os.environ)
+ newenv.pop('OECORE_NATIVE_SYSROOT', '')
+ exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True, env=newenv)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
index 922277b79f..3aa42a1466 100644
--- a/scripts/lib/devtool/sdk.py
+++ b/scripts/lib/devtool/sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
@@ -132,9 +122,9 @@ def sdk_update(args, config, basepath, workspace):
# Grab variable values
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
- stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR', True)
- sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS', True)
- site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION', True)
+ stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR')
+ sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS')
+ site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION')
finally:
tinfoil.shutdown()
@@ -145,6 +135,9 @@ def sdk_update(args, config, basepath, workspace):
# Fetch manifest from server
tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
+ if ret != 0:
+ logger.error("Cannot dowload files from %s" % updateserver)
+ return ret
changedfiles = check_manifest(tmpmanifest, basepath)
if not changedfiles:
logger.info("Already up-to-date")
@@ -155,7 +148,7 @@ def sdk_update(args, config, basepath, workspace):
if os.path.exists(os.path.join(basepath, 'layers/.git')):
out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
if not out:
- ret = subprocess.call("git fetch --all; git reset --hard", shell=True, cwd=layers_dir)
+ ret = subprocess.call("git fetch --all; git reset --hard @{u}", shell=True, cwd=layers_dir)
else:
logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
logger.error("Changed files:\n%s" % out);
@@ -273,7 +266,7 @@ def sdk_install(args, config, basepath, workspace):
rd = parse_recipe(config, tinfoil, recipe, True)
if not rd:
return 1
- stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP', True), tasks[0])
+ stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP'), tasks[0])
if checkstamp(recipe):
logger.info('%s is already installed' % recipe)
else:
@@ -306,6 +299,12 @@ def sdk_install(args, config, basepath, workspace):
if failed:
return 2
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
+
+
def register_commands(subparsers, context):
"""Register devtool subcommands from the sdk plugin"""
if context.fixed_setup:
diff --git a/scripts/lib/devtool/search.py b/scripts/lib/devtool/search.py
index b44bed7f6f..d24040df37 100644
--- a/scripts/lib/devtool/search.py
+++ b/scripts/lib/devtool/search.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool search plugin"""
@@ -31,49 +21,79 @@ def search(args, config, basepath, workspace):
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
- pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
defsummary = tinfoil.config_data.getVar('SUMMARY', False) or ''
keyword_rc = re.compile(args.keyword)
- for fn in os.listdir(pkgdata_dir):
- pfn = os.path.join(pkgdata_dir, fn)
- if not os.path.isfile(pfn):
+ def print_match(pn):
+ rd = parse_recipe(config, tinfoil, pn, True)
+ if not rd:
+ return
+ summary = rd.getVar('SUMMARY')
+ if summary == rd.expand(defsummary):
+ summary = ''
+ print("%s %s" % (pn.ljust(20), summary))
+
+
+ matches = []
+ if os.path.exists(pkgdata_dir):
+ for fn in os.listdir(pkgdata_dir):
+ pfn = os.path.join(pkgdata_dir, fn)
+ if not os.path.isfile(pfn):
+ continue
+
+ packages = []
+ match = False
+ if keyword_rc.search(fn):
+ match = True
+
+ if not match:
+ with open(pfn, 'r') as f:
+ for line in f:
+ if line.startswith('PACKAGES:'):
+ packages = line.split(':', 1)[1].strip().split()
+
+ for pkg in packages:
+ if keyword_rc.search(pkg):
+ match = True
+ break
+ if os.path.exists(os.path.join(pkgdata_dir, 'runtime', pkg + '.packaged')):
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
+ for line in f:
+ if ': ' in line:
+ splitline = line.split(':', 1)
+ key = splitline[0]
+ value = splitline[1].strip()
+ if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ if keyword_rc.search(value):
+ match = True
+ break
+ if match:
+ print_match(fn)
+ matches.append(fn)
+ else:
+ logger.warning('Package data is not available, results may be limited')
+
+ for recipe in tinfoil.all_recipes():
+ if args.fixed_setup and 'nativesdk' in recipe.inherits():
continue
- packages = []
match = False
- if keyword_rc.search(fn):
+ if keyword_rc.search(recipe.pn):
match = True
-
- if not match:
- with open(pfn, 'r') as f:
- for line in f:
- if line.startswith('PACKAGES:'):
- packages = line.split(':', 1)[1].strip().split()
-
- for pkg in packages:
- if keyword_rc.search(pkg):
+ else:
+ for prov in recipe.provides:
+ if keyword_rc.search(prov):
match = True
break
- if os.path.exists(os.path.join(pkgdata_dir, 'runtime', pkg + '.packaged')):
- with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
- for line in f:
- if ': ' in line:
- splitline = line.split(':', 1)
- key = splitline[0]
- value = splitline[1].strip()
- if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
- if keyword_rc.search(value):
- match = True
- break
-
- if match:
- rd = parse_recipe(config, tinfoil, fn, True)
- summary = rd.getVar('SUMMARY', True)
- if summary == rd.expand(defsummary):
- summary = ''
- print("%s %s" % (fn.ljust(20), summary))
+ if not match:
+ for rprov in recipe.rprovides:
+ if keyword_rc.search(rprov):
+ match = True
+ break
+ if match and not recipe.pn in matches:
+ print_match(recipe.pn)
finally:
tinfoil.shutdown()
@@ -82,7 +102,7 @@ def search(args, config, basepath, workspace):
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
parser_search = subparsers.add_parser('search', help='Search available recipes',
- description='Searches for available target recipes. Matches on recipe name, package name, description and installed files, and prints the recipe name on match.',
+ description='Searches for available recipes. Matches on recipe name, package name, description and installed files, and prints the recipe name and summary on match.',
group='info')
- parser_search.add_argument('keyword', help='Keyword to search for (regular expression syntax allowed)')
- parser_search.set_defaults(func=search, no_workspace=True)
+ parser_search.add_argument('keyword', help='Keyword to search for (regular expression syntax allowed, use quotes to avoid shell expansion)')
+ parser_search.set_defaults(func=search, no_workspace=True, fixed_setup=context.fixed_setup)
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index 83191450be..1c0cd8ab51 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -1,19 +1,9 @@
# Development tool - standard commands plugin
#
-# Copyright (C) 2014-2016 Intel Corporation
+# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool standard plugins"""
import os
@@ -30,11 +20,13 @@ import errno
import glob
import filecmp
from collections import OrderedDict
-from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, update_unlockedsigs, check_prerelease_version, check_git_repo_dirty, check_git_repo_op, DevtoolError
from devtool import parse_recipe
logger = logging.getLogger('devtool')
+override_branch_prefix = 'devtool-override-'
+
def add(args, config, basepath, workspace):
"""Entry point for the devtool 'add' subcommand"""
@@ -64,7 +56,13 @@ def add(args, config, basepath, workspace):
args.srctree = args.recipename
args.recipename = None
elif os.path.isdir(args.recipename):
- logger.warn('Ambiguous argument %s - assuming you mean it to be the recipe name')
+ logger.warning('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
+
+ if not args.fetchuri:
+ if args.srcrev:
+ raise DevtoolError('The -S/--srcrev option is only valid when fetching from an SCM repository')
+ if args.srcbranch:
+ raise DevtoolError('The -B/--srcbranch option is only valid when fetching from an SCM repository')
if args.srctree and os.path.isfile(args.srctree):
args.fetchuri = 'file://' + os.path.abspath(args.srctree)
@@ -74,7 +72,7 @@ def add(args, config, basepath, workspace):
if args.fetchuri:
raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
else:
- # FIXME should show a warning that -f/--fetch is deprecated here
+ logger.warning('-f/--fetch option is deprecated - you can now simply specify the URL to fetch as a positional argument instead')
args.fetchuri = args.fetch
if args.recipename:
@@ -147,16 +145,26 @@ def add(args, config, basepath, workspace):
extracmdopts += ' --src-subdir "%s"' % args.src_subdir
if args.autorev:
extracmdopts += ' -a'
+ if args.fetch_dev:
+ extracmdopts += ' --fetch-dev'
+ if args.mirrors:
+ extracmdopts += ' --mirrors'
+ if args.srcrev:
+ extracmdopts += ' --srcrev %s' % args.srcrev
+ if args.srcbranch:
+ extracmdopts += ' --srcbranch %s' % args.srcbranch
+ if args.provides:
+ extracmdopts += ' --provides %s' % args.provides
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
try:
- stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create -o %s "%s" %s' % (color, tempdir, source, extracmdopts))
+ stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create --devtool -o %s \'%s\' %s' % (color, tempdir, source, extracmdopts), watch=True)
except bb.process.ExecutionError as e:
if e.exitcode == 15:
raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
else:
- raise DevtoolError('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ raise DevtoolError('Command \'%s\' failed' % e.command)
recipes = glob.glob(os.path.join(tempdir, '*.bb'))
if recipes:
@@ -199,7 +207,7 @@ def add(args, config, basepath, workspace):
raise DevtoolError('Command \'%s\' did not create any recipe file:\n%s' % (e.command, e.stdout))
attic_recipe = os.path.join(config.workspace_path, 'attic', recipename, os.path.basename(recipefile))
if os.path.exists(attic_recipe):
- logger.warn('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
+ logger.warning('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
finally:
if tmpsrcdir and os.path.exists(tmpsrcdir):
shutil.rmtree(tmpsrcdir)
@@ -210,8 +218,17 @@ def add(args, config, basepath, workspace):
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
- rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, None)
+ try:
+ rd = tinfoil.parse_recipe_file(recipefile, False)
+ except Exception as e:
+ logger.error(str(e))
+ rd = None
if not rd:
+ # Parsing failed. We just created this recipe and we shouldn't
+ # leave it in the workdir or it'll prevent bitbake from starting
+ movefn = '%s.parsefailed' % recipefile
+ logger.error('Parsing newly created recipe failed, moving recipe to %s for reference. If this looks to be caused by the recipe itself, please report this error.' % movefn)
+ shutil.move(recipefile, movefn)
return 1
if args.fetchuri and not args.no_git:
@@ -253,8 +270,28 @@ def add(args, config, basepath, workspace):
f.write(' done\n')
f.write('}\n')
+ # Check if the new layer provides recipes whose priorities have been
+ # overriden by PREFERRED_PROVIDER.
+ recipe_name = rd.getVar('PN')
+ provides = rd.getVar('PROVIDES')
+ # Search every item defined in PROVIDES
+ for recipe_provided in provides.split():
+ preferred_provider = 'PREFERRED_PROVIDER_' + recipe_provided
+ current_pprovider = rd.getVar(preferred_provider)
+ if current_pprovider and current_pprovider != recipe_name:
+ if args.fixed_setup:
+ #if we are inside the eSDK add the new PREFERRED_PROVIDER in the workspace layer.conf
+ layerconf_file = os.path.join(config.workspace_path, "conf", "layer.conf")
+ with open(layerconf_file, 'a') as f:
+ f.write('%s = "%s"\n' % (preferred_provider, recipe_name))
+ else:
+ logger.warning('Set \'%s\' in order to use the recipe' % preferred_provider)
+ break
+
_add_md5(config, recipename, appendfile)
+ check_prerelease_version(rd.getVar('PV'), 'devtool add')
+
logger.info('Recipe %s has been automatically created; further editing may be required to make it fully functional' % recipefile)
finally:
@@ -289,26 +326,52 @@ def _check_compatible_recipe(pn, d):
raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
"not supported by this tool" % pn, 4)
- if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC', True):
+ if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
# Not an incompatibility error per se, so we don't pass the error code
raise DevtoolError("externalsrc is currently enabled for the %s "
"recipe. This prevents the normal do_patch task "
"from working. You will need to disable this "
"first." % pn)
-def _move_file(src, dst):
- """Move a file. Creates all the directory components of destination path."""
+def _dry_run_copy(src, dst, dry_run_outdir, base_outdir):
+ """Common function for copying a file to the dry run output directory"""
+ relpath = os.path.relpath(dst, base_outdir)
+ if relpath.startswith('..'):
+ raise Exception('Incorrect base path %s for path %s' % (base_outdir, dst))
+ dst = os.path.join(dry_run_outdir, relpath)
dst_d = os.path.dirname(dst)
if dst_d:
bb.utils.mkdirhier(dst_d)
- shutil.move(src, dst)
+ # Don't overwrite existing files, otherwise in the case of an upgrade
+ # the dry-run written out recipe will be overwritten with an unmodified
+ # version
+ if not os.path.exists(dst):
+ shutil.copy(src, dst)
+
+def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
+ """Move a file. Creates all the directory components of destination path."""
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+ logger.debug('Moving %s to %s%s' % (src, dst, dry_run_suffix))
+ if dry_run_outdir:
+ # We want to copy here, not move
+ _dry_run_copy(src, dst, dry_run_outdir, base_outdir)
+ else:
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.move(src, dst)
-def _copy_file(src, dst):
+def _copy_file(src, dst, dry_run_outdir=None):
"""Copy a file. Creates all the directory components of destination path."""
- dst_d = os.path.dirname(dst)
- if dst_d:
- bb.utils.mkdirhier(dst_d)
- shutil.copy(src, dst)
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+ logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
+ if dry_run_outdir:
+ _dry_run_copy(src, dst, dry_run_outdir, base_outdir)
+ else:
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.copy(src, dst)
def _git_ls_tree(repodir, treeish='HEAD', recursive=False):
"""List contents of a git treeish"""
@@ -318,10 +381,11 @@ def _git_ls_tree(repodir, treeish='HEAD', recursive=False):
cmd.append('-r')
out, _ = bb.process.run(cmd, cwd=repodir)
ret = {}
- for line in out.split('\0'):
- if line:
- split = line.split(None, 4)
- ret[split[3]] = split[0:3]
+ if out:
+ for line in out.split('\0'):
+ if line:
+ split = line.split(None, 4)
+ ret[split[3]] = split[0:3]
return ret
def _git_exclude_path(srctree, path):
@@ -353,7 +417,7 @@ def extract(args, config, basepath, workspace):
"""Entry point for the devtool 'extract' subcommand"""
import bb
- tinfoil = _prep_extract_operation(config, basepath, args.recipename)
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
if not tinfoil:
# Error already shown
return 1
@@ -363,7 +427,7 @@ def extract(args, config, basepath, workspace):
return 1
srctree = os.path.abspath(args.srctree)
- initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd)
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
logger.info('Source tree extracted to %s' % srctree)
if initial_rev:
@@ -377,7 +441,7 @@ def sync(args, config, basepath, workspace):
"""Entry point for the devtool 'sync' subcommand"""
import bb
- tinfoil = _prep_extract_operation(config, basepath, args.recipename)
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
if not tinfoil:
# Error already shown
return 1
@@ -387,7 +451,7 @@ def sync(args, config, basepath, workspace):
return 1
srctree = os.path.abspath(args.srctree)
- initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd)
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, True, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=True)
logger.info('Source tree %s synchronized' % srctree)
if initial_rev:
@@ -397,109 +461,39 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
-class BbTaskExecutor(object):
- """Class for executing bitbake tasks for a recipe
-
- FIXME: This is very awkward. Unfortunately it's not currently easy to
- properly execute tasks outside of bitbake itself, until then this has to
- suffice if we are to handle e.g. linux-yocto's extra tasks
- """
-
- def __init__(self, rdata):
- self.rdata = rdata
- self.executed = []
-
- def exec_func(self, func, report):
- """Run bitbake task function"""
- if not func in self.executed:
- deps = self.rdata.getVarFlag(func, 'deps', False)
- if deps:
- for taskdepfunc in deps:
- self.exec_func(taskdepfunc, True)
- if report:
- logger.info('Executing %s...' % func)
- fn = self.rdata.getVar('FILE', True)
- localdata = bb.build._task_data(fn, func, self.rdata)
- try:
- bb.build.exec_func(func, localdata)
- except bb.build.FuncFailed as e:
- raise DevtoolError(str(e))
- self.executed.append(func)
-
-
-class PatchTaskExecutor(BbTaskExecutor):
- def __init__(self, rdata):
- import oe.patch
- self.check_git = False
- self.useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(self.useroptions, d=rdata)
- super(PatchTaskExecutor, self).__init__(rdata)
-
- def exec_func(self, func, report):
- from oe.patch import GitApplyTree
- srcsubdir = self.rdata.getVar('S', True)
- haspatches = False
- if func == 'do_patch':
- patchdir = os.path.join(srcsubdir, 'patches')
- if os.path.exists(patchdir):
- if os.listdir(patchdir):
- haspatches = True
- else:
- os.rmdir(patchdir)
-
- super(PatchTaskExecutor, self).exec_func(func, report)
- if self.check_git and os.path.exists(srcsubdir):
- if func == 'do_patch':
- if os.path.exists(patchdir):
- shutil.rmtree(patchdir)
- if haspatches:
- stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
- if stdout:
- bb.process.run('git checkout patches', cwd=srcsubdir)
-
- stdout, _ = bb.process.run('git status --porcelain', cwd=srcsubdir)
- if stdout:
- bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(self.useroptions), func, GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
-
-
-def _prep_extract_operation(config, basepath, recipename, tinfoil=None):
- """HACK: Ugly workaround for making sure that requirements are met when
- trying to extract a package. Returns the tinfoil instance to be used."""
- if not tinfoil:
- tinfoil = setup_tinfoil(basepath=basepath)
-
- rd = parse_recipe(config, tinfoil, recipename, True)
- if not rd:
- return None
-
- if bb.data.inherits_class('kernel-yocto', rd):
- tinfoil.shutdown()
- try:
- stdout, _ = exec_build_env_command(config.init_path, basepath,
- 'bitbake kern-tools-native')
- tinfoil = setup_tinfoil(basepath=basepath)
- except bb.process.ExecutionError as err:
- raise DevtoolError("Failed to build kern-tools-native:\n%s" %
- err.stdout)
- return tinfoil
-
-
-def _extract_source(srctree, keep_temp, devbranch, sync, d):
+def symlink_oelocal_files_srctree(rd,srctree):
+ import oe.patch
+ if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
+ # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+ # (otherwise the recipe won't build as expected)
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ addfiles = []
+ for root, _, files in os.walk(local_files_dir):
+ relpth = os.path.relpath(root, local_files_dir)
+ if relpth != '.':
+ bb.utils.mkdirhier(os.path.join(srctree, relpth))
+ for fn in files:
+ if fn == '.gitignore':
+ continue
+ destpth = os.path.join(srctree, relpth, fn)
+ if os.path.exists(destpth):
+ os.unlink(destpth)
+ os.symlink('oe-local-files/%s' % fn, destpth)
+ addfiles.append(os.path.join(relpth, fn))
+ if addfiles:
+ bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
+ bb.process.run('git %s commit -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+
+
+def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
- import bb.event
import oe.recipeutils
+ import oe.patch
+ import oe.path
- def eventfilter(name, handler, event, d):
- """Bitbake event filter for devtool extract operation"""
- if name == 'base_eventhandler':
- return True
- else:
- return False
-
- if hasattr(bb.event, 'set_eventfilter'):
- bb.event.set_eventfilter(eventfilter)
-
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
_check_compatible_recipe(pn, d)
@@ -524,105 +518,119 @@ def _extract_source(srctree, keep_temp, devbranch, sync, d):
bb.utils.mkdirhier(srctree)
os.rmdir(srctree)
- # We don't want notes to be printed, they are too verbose
- origlevel = bb.logger.getEffectiveLevel()
- if logger.getEffectiveLevel() > logging.DEBUG:
- bb.logger.setLevel(logging.WARNING)
+ extra_overrides = []
+ if not no_overrides:
+ history = d.varhistory.variable('SRC_URI')
+ for event in history:
+ if not 'flag' in event:
+ if event['op'].startswith(('_append[', '_prepend[')):
+ extra_overrides.append(event['op'].split('[')[1].split(']')[0])
+ # We want to remove duplicate overrides. If a recipe had multiple
+ # SRC_URI_override += values it would cause mulitple instances of
+ # overrides. This doesn't play nicely with things like creating a
+ # branch for every instance of DEVTOOL_EXTRA_OVERRIDES.
+ extra_overrides = list(set(extra_overrides))
+ if extra_overrides:
+ logger.info('SRC_URI contains some conditional appends/prepends - will create branches to represent these')
initial_rev = None
- tempdir = tempfile.mkdtemp(prefix='devtool')
- try:
- crd = d.createCopy()
- # Make a subdir so we guard against WORKDIR==S
- workdir = os.path.join(tempdir, 'workdir')
- crd.setVar('WORKDIR', workdir)
- crd.setVar('T', os.path.join(tempdir, 'temp'))
- if not crd.getVar('S', True).startswith(workdir):
- # Usually a shared workdir recipe (kernel, gcc)
- # Try to set a reasonable default
- if bb.data.inherits_class('kernel', d):
- crd.setVar('S', '${WORKDIR}/source')
- else:
- crd.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S', True)))
- if bb.data.inherits_class('kernel', d):
- # We don't want to move the source to STAGING_KERNEL_DIR here
- crd.setVar('STAGING_KERNEL_DIR', '${S}')
-
- task_executor = PatchTaskExecutor(crd)
-
- crd.setVar('EXTERNALSRC_forcevariable', '')
-
- logger.info('Fetching %s...' % pn)
- task_executor.exec_func('do_fetch', False)
- logger.info('Unpacking...')
- task_executor.exec_func('do_unpack', False)
- if bb.data.inherits_class('kernel-yocto', d):
- # Extra step for kernel to populate the source directory
- logger.info('Doing kernel checkout...')
- task_executor.exec_func('do_kernel_checkout', False)
- srcsubdir = crd.getVar('S', True)
-
- task_executor.check_git = True
-
- # Move local source files into separate subdir
- recipe_patches = [os.path.basename(patch) for patch in
- oe.recipeutils.get_recipe_patches(crd)]
- local_files = oe.recipeutils.get_recipe_local_files(crd)
- local_files = [fname for fname in local_files if
- os.path.exists(os.path.join(workdir, fname))]
- if local_files:
- for fname in local_files:
- _move_file(os.path.join(workdir, fname),
- os.path.join(tempdir, 'oe-local-files', fname))
- with open(os.path.join(tempdir, 'oe-local-files', '.gitignore'),
- 'w') as f:
- f.write('# Ignore local files, by default. Remove this file '
- 'if you want to commit the directory to Git\n*\n')
-
- if srcsubdir == workdir:
- # Find non-patch non-local sources that were "unpacked" to srctree
- # directory
- src_files = [fname for fname in _ls_tree(workdir) if
- os.path.basename(fname) not in recipe_patches]
- # Force separate S so that patch files can be left out from srctree
- srcsubdir = tempfile.mkdtemp(dir=workdir)
- crd.setVar('S', srcsubdir)
- # Move source files to S
- for path in src_files:
- _move_file(os.path.join(workdir, path),
- os.path.join(srcsubdir, path))
- elif os.path.dirname(srcsubdir) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.join(workdir, os.path.relpath(srcsubdir, workdir).split(os.sep)[0])
-
- scriptutils.git_convert_standalone_clone(srcsubdir)
- # Make sure that srcsubdir exists
- bb.utils.mkdirhier(srcsubdir)
- if not os.path.exists(srcsubdir) or not os.listdir(srcsubdir):
- logger.warning("no source unpacked to S, either the %s recipe "
- "doesn't use any source or the correct source "
- "directory could not be determined" % pn)
-
- setup_git_repo(srcsubdir, crd.getVar('PV', True), devbranch, d=d)
-
- (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
- initial_rev = stdout.rstrip()
+ appendexisted = False
+ recipefile = d.getVar('FILE')
+ appendfile = recipe_to_append(recipefile, config)
+ is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+
+ # We need to redirect WORKDIR, STAMPS_DIR etc. under a temporary
+ # directory so that:
+ # (a) we pick up all files that get unpacked to the WORKDIR, and
+ # (b) we don't disturb the existing build
+ # However, with recipe-specific sysroots the sysroots for the recipe
+ # will be prepared under WORKDIR, and if we used the system temporary
+ # directory (i.e. usually /tmp) as used by mkdtemp by default, then
+ # our attempts to hardlink files into the recipe-specific sysroots
+ # will fail on systems where /tmp is a different filesystem, and it
+ # would have to fall back to copying the files which is a waste of
+ # time. Put the temp directory under the WORKDIR to prevent that from
+ # being a problem.
+ tempbasedir = d.getVar('WORKDIR')
+ bb.utils.mkdirhier(tempbasedir)
+ tempdir = tempfile.mkdtemp(prefix='devtooltmp-', dir=tempbasedir)
+ try:
+ tinfoil.logger.setLevel(logging.WARNING)
- crd.setVar('PATCHTOOL', 'git')
+ # FIXME this results in a cache reload under control of tinfoil, which is fine
+ # except we don't get the knotty progress bar
- logger.info('Patching...')
- task_executor.exec_func('do_patch', False)
+ if os.path.exists(appendfile):
+ appendbackup = os.path.join(tempdir, os.path.basename(appendfile) + '.bak')
+ shutil.copyfile(appendfile, appendbackup)
+ else:
+ appendbackup = None
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ logger.debug('writing append file %s' % appendfile)
+ with open(appendfile, 'a') as f:
+ f.write('###--- _extract_source\n')
+ f.write('DEVTOOL_TEMPDIR = "%s"\n' % tempdir)
+ f.write('DEVTOOL_DEVBRANCH = "%s"\n' % devbranch)
+ if not is_kernel_yocto:
+ f.write('PATCHTOOL = "git"\n')
+ f.write('PATCH_COMMIT_FUNCTIONS = "1"\n')
+ if extra_overrides:
+ f.write('DEVTOOL_EXTRA_OVERRIDES = "%s"\n' % ':'.join(extra_overrides))
+ f.write('inherit devtool-source\n')
+ f.write('###--- _extract_source\n')
+
+ update_unlockedsigs(basepath, workspace, fixed_setup, [pn])
+
+ sstate_manifests = d.getVar('SSTATE_MANIFESTS')
+ bb.utils.mkdirhier(sstate_manifests)
+ preservestampfile = os.path.join(sstate_manifests, 'preserve-stamps')
+ with open(preservestampfile, 'w') as f:
+ f.write(d.getVar('STAMP'))
+ try:
+ if is_kernel_yocto:
+ # We need to generate the kernel config
+ task = 'do_configure'
+ else:
+ task = 'do_patch'
- bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
+ # Run the fetch + unpack tasks
+ res = tinfoil.build_targets(pn,
+ task,
+ handle_events=True)
+ finally:
+ if os.path.exists(preservestampfile):
+ os.remove(preservestampfile)
- kconfig = None
- if bb.data.inherits_class('kernel-yocto', d):
- # Store generate and store kernel config
- logger.info('Generating kernel config')
- task_executor.exec_func('do_configure', False)
- kconfig = os.path.join(crd.getVar('B', True), '.config')
+ if not res:
+ raise DevtoolError('Extracting source for %s failed' % pn)
+ try:
+ with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
+ initial_rev = f.read()
+
+ with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
+ srcsubdir = f.read()
+ except FileNotFoundError as e:
+ raise DevtoolError('Something went wrong with source extraction - the devtool-source class was not active or did not function correctly:\n%s' % str(e))
+ srcsubdir_rel = os.path.relpath(srcsubdir, os.path.join(tempdir, 'workdir'))
+
+ # Check if work-shared is empty, if yes
+ # find source and copy to work-shared
+ if is_kernel_yocto:
+ workshareddir = d.getVar('STAGING_KERNEL_DIR')
+ staging_kerVer = get_staging_kver(workshareddir)
+ kernelVersion = d.getVar('LINUX_VERSION')
+
+ # handle dangling symbolic link in work-shared:
+ if os.path.islink(workshareddir):
+ os.unlink(workshareddir)
+
+ if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
+ shutil.rmtree(workshareddir)
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ elif not os.path.exists(workshareddir):
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
@@ -652,19 +660,22 @@ def _extract_source(srctree, keep_temp, devbranch, sync, d):
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
+ symlink_oelocal_files_srctree(d,srctree)
- if kconfig:
+ if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
- shutil.copy2(kconfig, srctree)
+ shutil.copy2(os.path.join(tempdir, '.config'), srctree)
finally:
- bb.logger.setLevel(origlevel)
-
+ if appendbackup:
+ shutil.copyfile(appendbackup, appendfile)
+ elif os.path.exists(appendfile):
+ os.remove(appendfile)
if keep_temp:
logger.info('Preserving temporary directory %s' % tempdir)
else:
shutil.rmtree(tempdir)
- return initial_rev
+ return initial_rev, srcsubdir_rel
def _add_md5(config, recipename, filename):
"""Record checksum of a file (or recursively for a directory) to the md5-file of the workspace"""
@@ -672,8 +683,11 @@ def _add_md5(config, recipename, filename):
def addfile(fn):
md5 = bb.utils.md5_file(fn)
- with open(os.path.join(config.workspace_path, '.devtool_md5'), 'a') as f:
- f.write('%s|%s|%s\n' % (recipename, os.path.relpath(fn, config.workspace_path), md5))
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'a+') as f:
+ md5_str = '%s|%s|%s\n' % (recipename, os.path.relpath(fn, config.workspace_path), md5)
+ f.seek(0, os.SEEK_SET)
+ if not md5_str in f.read():
+ f.write(md5_str)
if os.path.isdir(filename):
for root, _, files in os.walk(filename):
@@ -706,7 +720,7 @@ def _check_preserve(config, recipename):
if splitline[2] != md5:
bb.utils.mkdirhier(preservepath)
preservefile = os.path.basename(removefile)
- logger.warn('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
+ logger.warning('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
shutil.move(removefile, os.path.join(preservepath, preservefile))
else:
os.remove(removefile)
@@ -714,22 +728,43 @@ def _check_preserve(config, recipename):
tf.write(line)
os.rename(newfile, origfile)
+def get_staging_kver(srcdir):
+ # Kernel version from work-shared
+ kerver = []
+ staging_kerVer=""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ with open(os.path.join(srcdir,"Makefile")) as f:
+ version = [next(f) for x in range(5)][1:4]
+ for word in version:
+ kerver.append(word.split('= ')[1].split('\n')[0])
+ staging_kerVer = ".".join(kerver)
+ return staging_kerVer
+
+def get_staging_kbranch(srcdir):
+ staging_kbranch = ""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ (branch, _) = bb.process.run('git branch | grep \* | cut -d \' \' -f2', cwd=srcdir)
+ staging_kbranch = "".join(branch.split('\n')[0])
+ return staging_kbranch
+
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
import oe.recipeutils
+ import oe.patch
+ import oe.path
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" %
args.recipename)
- tinfoil = setup_tinfoil(basepath=basepath)
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if pn != args.recipename:
logger.info('Mapping %s to %s' % (args.recipename, pn))
if pn in workspace:
@@ -745,13 +780,8 @@ def modify(args, config, basepath, workspace):
raise DevtoolError("--no-extract specified and source path %s does "
"not exist or is not a directory" %
srctree)
- if not args.no_extract:
- tinfoil = _prep_extract_operation(config, basepath, pn, tinfoil)
- if not tinfoil:
- # Error already shown
- return 1
- recipefile = rd.getVar('FILE', True)
+ recipefile = rd.getVar('FILE')
appendfile = recipe_to_append(recipefile, config, args.wildcard)
if os.path.exists(appendfile):
raise DevtoolError("Another variant of recipe %s is already in your "
@@ -763,21 +793,82 @@ def modify(args, config, basepath, workspace):
initial_rev = None
commits = []
+ check_commits = False
+
+ if bb.data.inherits_class('kernel-yocto', rd):
+ # Current set kernel version
+ kernelVersion = rd.getVar('LINUX_VERSION')
+ srcdir = rd.getVar('STAGING_KERNEL_DIR')
+ kbranch = rd.getVar('KBRANCH')
+
+ staging_kerVer = get_staging_kver(srcdir)
+ staging_kbranch = get_staging_kbranch(srcdir)
+ if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
+ oe.path.copyhardlinktree(srcdir,srctree)
+ workdir = rd.getVar('WORKDIR')
+ srcsubdir = rd.getVar('S')
+ localfilesdir = os.path.join(srctree,'oe-local-files')
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
+ local_files = oe.recipeutils.get_recipe_local_files(rd)
+
+ for key in local_files.copy():
+ if key.endswith('scc'):
+ sccfile = open(local_files[key], 'r')
+ for l in sccfile:
+ line = l.split()
+ if line and line[0] in ('kconf', 'patch'):
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if not cfg in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
+ sccfile.close()
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if os.path.exists(os.path.join(workdir, fname)) and (srcabspath == workdir or not os.path.join(workdir, fname).startswith(srcabspath + os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
+ with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file ''if you want to commit the directory to Git\n*\n')
+
+ symlink_oelocal_files_srctree(rd,srctree)
+
+ task = 'do_configure'
+ res = tinfoil.build_targets(pn, task, handle_events=True)
+
+ # Copy .config to workspace
+ kconfpath = rd.getVar('B')
+ logger.info('Copying kernel config to workspace')
+ shutil.copy2(os.path.join(kconfpath, '.config'),srctree)
+
+ # Set this to true, we still need to get initial_rev
+ # by parsing the git repo
+ args.no_extract = True
+
if not args.no_extract:
- initial_rev = _extract_source(srctree, False, args.branch, False, rd)
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
if not initial_rev:
return 1
logger.info('Source tree extracted to %s' % srctree)
# Get list of commits since this revision
(stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
commits = stdout.split()
+ check_commits = True
else:
if os.path.exists(os.path.join(srctree, '.git')):
- # Check if it's a tree previously extracted by us
+ # Check if it's a tree previously extracted by us. This is done
+ # by ensuring that devtool-base and args.branch (devtool) exist.
+ # The check_commits logic will cause an exception if either one
+ # of these doesn't exist
try:
(stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
+ bb.process.run('git rev-parse %s' % args.branch, cwd=srctree)
except bb.process.ExecutionError:
stdout = ''
+ if stdout:
+ check_commits = True
for line in stdout.splitlines():
if line.startswith('*'):
(stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
@@ -787,9 +878,36 @@ def modify(args, config, basepath, workspace):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
initial_rev = stdout.rstrip()
+ branch_patches = {}
+ if check_commits:
+ # Check if there are override branches
+ (stdout, _) = bb.process.run('git branch', cwd=srctree)
+ branches = []
+ for line in stdout.rstrip().splitlines():
+ branchname = line[2:].rstrip()
+ if branchname.startswith(override_branch_prefix):
+ branches.append(branchname)
+ if branches:
+ logger.warning('SRC_URI is conditionally overridden in this recipe, thus several %s* branches have been created, one for each override that makes changes to SRC_URI. It is recommended that you make changes to the %s branch first, then checkout and rebase each %s* branch and update any unique patches there (duplicates on those branches will be ignored by devtool finish/update-recipe)' % (override_branch_prefix, args.branch, override_branch_prefix))
+ branches.insert(0, args.branch)
+ seen_patches = []
+ for branch in branches:
+ branch_patches[branch] = []
+ (stdout, _) = bb.process.run('git log devtool-base..%s' % branch, cwd=srctree)
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith(oe.patch.GitApplyTree.patch_line_prefix):
+ origpatch = line[len(oe.patch.GitApplyTree.patch_line_prefix):].split(':', 1)[-1].strip()
+ if not origpatch in seen_patches:
+ seen_patches.append(origpatch)
+ branch_patches[branch].append(origpatch)
+
+ # Need to grab this here in case the source is within a subdirectory
+ srctreebase = srctree
+
# Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S', True))
- workdir = os.path.abspath(rd.getVar('WORKDIR', True))
+ s = os.path.abspath(rd.getVar('S'))
+ workdir = os.path.abspath(rd.getVar('WORKDIR'))
if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
# Handle if S is set to a subdirectory of the source
srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
@@ -801,7 +919,8 @@ def modify(args, config, basepath, workspace):
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
f.write('FILESPATH_prepend := "%s:"\n' %
- os.path.join(srctree, 'oe-local-files'))
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
f.write('\ninherit externalsrc\n')
f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
@@ -813,15 +932,28 @@ def modify(args, config, basepath, workspace):
if bb.data.inherits_class('kernel', rd):
f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_patch do_kernel_configme do_kernel_configcheck"\n')
+ 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
+ f.write('\ndo_patch[noexec] = "1"\n')
f.write('\ndo_configure_append() {\n'
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
+ if rd.getVarFlag('do_menuconfig','task'):
+ f.write('\ndo_configure_append() {\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ '}\n')
if initial_rev:
f.write('\n# initial_rev: %s\n' % initial_rev)
for commit in commits:
f.write('# commit: %s\n' % commit)
+ if branch_patches:
+ for branch in branch_patches:
+ if branch == args.branch:
+ continue
+ f.write('# patches_%s: %s\n' % (branch, ','.join(branch_patches[branch])))
+
+ update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
_add_md5(config, pn, appendfile)
@@ -832,21 +964,222 @@ def modify(args, config, basepath, workspace):
return 0
-def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
+
+def rename(args, config, basepath, workspace):
+ """Entry point for the devtool 'rename' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ check_workspace_recipe(workspace, args.recipename)
+
+ if not (args.newname or args.version):
+ raise DevtoolError('You must specify a new name, a version with -V/--version, or both')
+
+ recipefile = workspace[args.recipename]['recipefile']
+ if not recipefile:
+ raise DevtoolError('devtool rename can only be used where the recipe file itself is in the workspace (e.g. after devtool add)')
+
+ if args.newname and args.newname != args.recipename:
+ reason = oe.recipeutils.validate_pn(args.newname)
+ if reason:
+ raise DevtoolError(reason)
+ newname = args.newname
+ else:
+ newname = args.recipename
+
+ append = workspace[args.recipename]['bbappend']
+ appendfn = os.path.splitext(os.path.basename(append))[0]
+ splitfn = appendfn.split('_')
+ if len(splitfn) > 1:
+ origfnver = appendfn.split('_')[1]
+ else:
+ origfnver = ''
+
+ recipefilemd5 = None
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ bp = rd.getVar('BP')
+ bpn = rd.getVar('BPN')
+ if newname != args.recipename:
+ localdata = rd.createCopy()
+ localdata.setVar('PN', newname)
+ newbpn = localdata.getVar('BPN')
+ else:
+ newbpn = bpn
+ s = rd.getVar('S', False)
+ src_uri = rd.getVar('SRC_URI', False)
+ pv = rd.getVar('PV')
+
+ # Correct variable values that refer to the upstream source - these
+ # values must stay the same, so if the name/version are changing then
+ # we need to fix them up
+ new_s = s
+ new_src_uri = src_uri
+ if newbpn != bpn:
+ # ${PN} here is technically almost always incorrect, but people do use it
+ new_s = new_s.replace('${BPN}', bpn)
+ new_s = new_s.replace('${PN}', bpn)
+ new_s = new_s.replace('${BP}', '%s-${PV}' % bpn)
+ new_src_uri = new_src_uri.replace('${BPN}', bpn)
+ new_src_uri = new_src_uri.replace('${PN}', bpn)
+ new_src_uri = new_src_uri.replace('${BP}', '%s-${PV}' % bpn)
+ if args.version and origfnver == pv:
+ new_s = new_s.replace('${PV}', pv)
+ new_s = new_s.replace('${BP}', '${BPN}-%s' % pv)
+ new_src_uri = new_src_uri.replace('${PV}', pv)
+ new_src_uri = new_src_uri.replace('${BP}', '${BPN}-%s' % pv)
+ patchfields = {}
+ if new_s != s:
+ patchfields['S'] = new_s
+ if new_src_uri != src_uri:
+ patchfields['SRC_URI'] = new_src_uri
+ if patchfields:
+ recipefilemd5 = bb.utils.md5_file(recipefile)
+ oe.recipeutils.patch_recipe(rd, recipefile, patchfields)
+ newrecipefilemd5 = bb.utils.md5_file(recipefile)
+ finally:
+ tinfoil.shutdown()
+
+ if args.version:
+ newver = args.version
+ else:
+ newver = origfnver
+
+ if newver:
+ newappend = '%s_%s.bbappend' % (newname, newver)
+ newfile = '%s_%s.bb' % (newname, newver)
+ else:
+ newappend = '%s.bbappend' % newname
+ newfile = '%s.bb' % newname
+
+ oldrecipedir = os.path.dirname(recipefile)
+ newrecipedir = os.path.join(config.workspace_path, 'recipes', newname)
+ if oldrecipedir != newrecipedir:
+ bb.utils.mkdirhier(newrecipedir)
+
+ newappend = os.path.join(os.path.dirname(append), newappend)
+ newfile = os.path.join(newrecipedir, newfile)
+
+ # Rename bbappend
+ logger.info('Renaming %s to %s' % (append, newappend))
+ os.rename(append, newappend)
+ # Rename recipe file
+ logger.info('Renaming %s to %s' % (recipefile, newfile))
+ os.rename(recipefile, newfile)
+
+ # Rename source tree if it's the default path
+ appendmd5 = None
+ if not args.no_srctree:
+ srctree = workspace[args.recipename]['srctree']
+ if os.path.abspath(srctree) == os.path.join(config.workspace_path, 'sources', args.recipename):
+ newsrctree = os.path.join(config.workspace_path, 'sources', newname)
+ logger.info('Renaming %s to %s' % (srctree, newsrctree))
+ shutil.move(srctree, newsrctree)
+ # Correct any references (basically EXTERNALSRC*) in the .bbappend
+ appendmd5 = bb.utils.md5_file(newappend)
+ appendlines = []
+ with open(newappend, 'r') as f:
+ for line in f:
+ appendlines.append(line)
+ with open(newappend, 'w') as f:
+ for line in appendlines:
+ if srctree in line:
+ line = line.replace(srctree, newsrctree)
+ f.write(line)
+ newappendmd5 = bb.utils.md5_file(newappend)
+
+ bpndir = None
+ newbpndir = None
+ if newbpn != bpn:
+ bpndir = os.path.join(oldrecipedir, bpn)
+ if os.path.exists(bpndir):
+ newbpndir = os.path.join(newrecipedir, newbpn)
+ logger.info('Renaming %s to %s' % (bpndir, newbpndir))
+ shutil.move(bpndir, newbpndir)
+
+ bpdir = None
+ newbpdir = None
+ if newver != origfnver or newbpn != bpn:
+ bpdir = os.path.join(oldrecipedir, bp)
+ if os.path.exists(bpdir):
+ newbpdir = os.path.join(newrecipedir, '%s-%s' % (newbpn, newver))
+ logger.info('Renaming %s to %s' % (bpdir, newbpdir))
+ shutil.move(bpdir, newbpdir)
+
+ if oldrecipedir != newrecipedir:
+ # Move any stray files and delete the old recipe directory
+ for entry in os.listdir(oldrecipedir):
+ oldpath = os.path.join(oldrecipedir, entry)
+ newpath = os.path.join(newrecipedir, entry)
+ logger.info('Renaming %s to %s' % (oldpath, newpath))
+ shutil.move(oldpath, newpath)
+ os.rmdir(oldrecipedir)
+
+ # Now take care of entries in .devtool_md5
+ md5entries = []
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'r') as f:
+ for line in f:
+ md5entries.append(line)
+
+ if bpndir and newbpndir:
+ relbpndir = os.path.relpath(bpndir, config.workspace_path) + '/'
+ else:
+ relbpndir = None
+ if bpdir and newbpdir:
+ relbpdir = os.path.relpath(bpdir, config.workspace_path) + '/'
+ else:
+ relbpdir = None
+
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'w') as f:
+ for entry in md5entries:
+ splitentry = entry.rstrip().split('|')
+ if len(splitentry) > 2:
+ if splitentry[0] == args.recipename:
+ splitentry[0] = newname
+ if splitentry[1] == os.path.relpath(append, config.workspace_path):
+ splitentry[1] = os.path.relpath(newappend, config.workspace_path)
+ if appendmd5 and splitentry[2] == appendmd5:
+ splitentry[2] = newappendmd5
+ elif splitentry[1] == os.path.relpath(recipefile, config.workspace_path):
+ splitentry[1] = os.path.relpath(newfile, config.workspace_path)
+ if recipefilemd5 and splitentry[2] == recipefilemd5:
+ splitentry[2] = newrecipefilemd5
+ elif relbpndir and splitentry[1].startswith(relbpndir):
+ splitentry[1] = os.path.relpath(os.path.join(newbpndir, splitentry[1][len(relbpndir):]), config.workspace_path)
+ elif relbpdir and splitentry[1].startswith(relbpdir):
+ splitentry[1] = os.path.relpath(os.path.join(newbpdir, splitentry[1][len(relbpdir):]), config.workspace_path)
+ entry = '|'.join(splitentry) + '\n'
+ f.write(entry)
+ return 0
+
+
+def _get_patchset_revs(srctree, recipe_path, initial_rev=None, force_patch_refresh=False):
"""Get initial and update rev of a recipe. These are the start point of the
whole patchset and start point for the patches to be re-generated/updated.
"""
import bb
+ # Get current branch
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+
# Parse initial rev from recipe if not specified
commits = []
+ patches = []
with open(recipe_path, 'r') as f:
for line in f:
if line.startswith('# initial_rev:'):
if not initial_rev:
initial_rev = line.split(':')[-1].strip()
- elif line.startswith('# commit:'):
+ elif line.startswith('# commit:') and not force_patch_refresh:
commits.append(line.split(':')[-1].strip())
+ elif line.startswith('# patches_%s:' % branchname):
+ patches = line.split(':')[-1].strip().split(',')
update_rev = initial_rev
changed_revs = None
@@ -865,7 +1198,7 @@ def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
except bb.process.ExecutionError as err:
stdout = None
- if stdout is not None:
+ if stdout is not None and not force_patch_refresh:
changed_revs = []
for line in stdout.splitlines():
if line.startswith('+ '):
@@ -873,7 +1206,7 @@ def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
if rev in newcommits:
changed_revs.append(rev)
- return initial_rev, update_rev, changed_revs
+ return initial_rev, update_rev, changed_revs, patches
def _remove_file_entries(srcuri, filelist):
"""Remove file:// entries from SRC_URI"""
@@ -890,8 +1223,20 @@ def _remove_file_entries(srcuri, filelist):
break
return entries, remaining
-def _remove_source_files(append, files, destpath):
+def _replace_srcuri_entry(srcuri, filename, newentry):
+ """Replace entry corresponding to specified file with a new entry"""
+ basename = os.path.basename(filename)
+ for i in range(len(srcuri)):
+ if os.path.basename(srcuri[i].split(';')[0]) == basename:
+ srcuri.pop(i)
+ srcuri.insert(i, newentry)
+ break
+
+def _remove_source_files(append, files, destpath, no_report_remove=False, dry_run=False):
"""Unlink existing patch files"""
+
+ dry_run_suffix = ' (dry-run)' if dry_run else ''
+
for path in files:
if append:
if not destpath:
@@ -899,22 +1244,24 @@ def _remove_source_files(append, files, destpath):
path = os.path.join(destpath, os.path.basename(path))
if os.path.exists(path):
- logger.info('Removing file %s' % path)
- # FIXME "git rm" here would be nice if the file in question is
- # tracked
- # FIXME there's a chance that this file is referred to by
- # another recipe, in which case deleting wouldn't be the
- # right thing to do
- os.remove(path)
- # Remove directory if empty
- try:
- os.rmdir(os.path.dirname(path))
- except OSError as ose:
- if ose.errno != errno.ENOTEMPTY:
- raise
-
-
-def _export_patches(srctree, rd, start_rev, destdir):
+ if not no_report_remove:
+ logger.info('Removing file %s%s' % (path, dry_run_suffix))
+ if not dry_run:
+ # FIXME "git rm" here would be nice if the file in question is
+ # tracked
+ # FIXME there's a chance that this file is referred to by
+ # another recipe, in which case deleting wouldn't be the
+ # right thing to do
+ os.remove(path)
+ # Remove directory if empty
+ try:
+ os.rmdir(os.path.dirname(path))
+ except OSError as ose:
+ if ose.errno != errno.ENOTEMPTY:
+ raise
+
+
+def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
"""Export patches from srctree to given location.
Returns three-tuple of dicts:
1. updated - patches that already exist in SRCURI
@@ -931,6 +1278,7 @@ def _export_patches(srctree, rd, start_rev, destdir):
existing_patches = dict((os.path.basename(path), path) for path in
oe.recipeutils.get_recipe_patches(rd))
+ logger.debug('Existing patches: %s' % existing_patches)
# Generate patches from Git, exclude local files directory
patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
@@ -943,18 +1291,44 @@ def _export_patches(srctree, rd, start_rev, destdir):
# revision This does assume that people are using unique shortlog
# values, but they ought to be anyway...
new_basename = seqpatch_re.match(new_patch).group(2)
- found = False
+ match_name = None
for old_patch in existing_patches:
old_basename = seqpatch_re.match(old_patch).group(2)
- if new_basename == old_basename:
- updated[new_patch] = existing_patches.pop(old_patch)
- found = True
- # Rename patch files
- if new_patch != old_patch:
- os.rename(os.path.join(destdir, new_patch),
- os.path.join(destdir, old_patch))
+ old_basename_splitext = os.path.splitext(old_basename)
+ if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+ old_patch_noext = os.path.splitext(old_patch)[0]
+ match_name = old_patch_noext
+ break
+ elif new_basename == old_basename:
+ match_name = old_patch
break
- if not found:
+ if match_name:
+ # Rename patch files
+ if new_patch != match_name:
+ os.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, match_name))
+ # Need to pop it off the list now before checking changed_revs
+ oldpath = existing_patches.pop(old_patch)
+ if changed_revs is not None:
+ # Avoid updating patches that have not actually changed
+ with open(os.path.join(destdir, match_name), 'r') as f:
+ firstlineitems = f.readline().split()
+ # Looking for "From <hash>" line
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs:
+ continue
+ # Recompress if necessary
+ if oldpath.endswith(('.gz', '.Z')):
+ bb.process.run(['gzip', match_name], cwd=destdir)
+ if oldpath.endswith('.gz'):
+ match_name += '.gz'
+ else:
+ match_name += '.Z'
+ elif oldpath.endswith('.bz2'):
+ bb.process.run(['bzip2', match_name], cwd=destdir)
+ match_name += '.bz2'
+ updated[match_name] = oldpath
+ else:
added[new_patch] = None
return (updated, added, existing_patches)
@@ -972,7 +1346,7 @@ def _create_kconfig_diff(srctree, rd, outfile):
stdout, stderr = pipe.communicate()
if pipe.returncode == 1:
logger.info("Updating config fragment %s" % outfile)
- with open(outfile, 'w') as fobj:
+ with open(outfile, 'wb') as fobj:
fobj.write(stdout)
elif pipe.returncode == 0:
logger.info("Would remove config fragment %s" % outfile)
@@ -986,7 +1360,7 @@ def _create_kconfig_diff(srctree, rd, outfile):
return False
-def _export_local_files(srctree, rd, destdir):
+def _export_local_files(srctree, rd, destdir, srctreebase):
"""Copy local files from srctree to given location.
Returns three-tuple of dicts:
1. updated - files that already exist in SRCURI
@@ -1006,7 +1380,7 @@ def _export_local_files(srctree, rd, destdir):
updated = OrderedDict()
added = OrderedDict()
removed = OrderedDict()
- local_files_dir = os.path.join(srctree, 'oe-local-files')
+ local_files_dir = os.path.join(srctreebase, 'oe-local-files')
git_files = _git_ls_tree(srctree)
if 'oe-local-files' in git_files:
# If tracked by Git, take the files from srctree HEAD. First get
@@ -1019,9 +1393,9 @@ def _export_local_files(srctree, rd, destdir):
new_set = list(_git_ls_tree(srctree, tree, True).keys())
elif os.path.isdir(local_files_dir):
# If not tracked by Git, just copy from working copy
- new_set = _ls_tree(os.path.join(srctree, 'oe-local-files'))
+ new_set = _ls_tree(local_files_dir)
bb.process.run(['cp', '-ax',
- os.path.join(srctree, 'oe-local-files', '.'), destdir])
+ os.path.join(local_files_dir, '.'), destdir])
else:
new_set = []
@@ -1043,6 +1417,20 @@ def _export_local_files(srctree, rd, destdir):
if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
os.unlink(os.path.join(local_files_dir, fragment_fn))
+ # Special handling for cml1, ccmake, etc bbclasses that generated
+ # configuration fragment files that are consumed as source files
+ for frag_class, frag_name in [("cml1", "fragment.cfg"), ("ccmake", "site-file.cmake")]:
+ if bb.data.inherits_class(frag_class, rd):
+ srcpath = os.path.join(rd.getVar('WORKDIR'), frag_name)
+ if os.path.exists(srcpath):
+ if frag_name not in new_set:
+ new_set.append(frag_name)
+ # copy fragment into destdir
+ shutil.copy2(srcpath, destdir)
+ # copy fragment into local files if exists
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(srcpath, local_files_dir)
+
if new_set is not None:
for fname in new_set:
if fname in existing_files:
@@ -1053,8 +1441,8 @@ def _export_local_files(srctree, rd, destdir):
elif fname != '.gitignore':
added[fname] = None
- workdir = rd.getVar('WORKDIR', True)
- s = rd.getVar('S', True)
+ workdir = rd.getVar('WORKDIR')
+ s = rd.getVar('S')
if not s.endswith(os.sep):
s += os.sep
@@ -1076,23 +1464,26 @@ def _export_local_files(srctree, rd, destdir):
def _determine_files_dir(rd):
"""Determine the appropriate files directory for a recipe"""
- recipedir = rd.getVar('FILE_DIRNAME', True)
- for entry in rd.getVar('FILESPATH', True).split(':'):
+ recipedir = rd.getVar('FILE_DIRNAME')
+ for entry in rd.getVar('FILESPATH').split(':'):
relpth = os.path.relpath(entry, recipedir)
if not os.sep in relpth:
# One (or zero) levels below only, so we don't put anything in machine-specific directories
if os.path.isdir(entry):
return entry
- return os.path.join(recipedir, rd.getVar('BPN', True))
+ return os.path.join(recipedir, rd.getVar('BPN'))
-def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remove):
+def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, no_report_remove, dry_run_outdir=None):
"""Implement the 'srcrev' mode of update-recipe"""
import bb
import oe.recipeutils
- recipefile = rd.getVar('FILE', True)
- logger.info('Updating SRCREV in recipe %s' % os.path.basename(recipefile))
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.basename(recipefile)
+ logger.info('Updating SRCREV in recipe %s%s' % (recipedir, dry_run_suffix))
# Get HEAD revision
try:
@@ -1112,18 +1503,21 @@ def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remo
srcuri = orig_src_uri.split()
tempdir = tempfile.mkdtemp(prefix='devtool')
update_srcuri = False
+ appendfile = None
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
+ srctreebase = workspace[recipename]['srctreebase']
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
if not no_remove:
# Find list of existing patches in recipe file
patches_dir = tempfile.mkdtemp(dir=tempdir)
- old_srcrev = (rd.getVar('SRCREV', False) or '')
+ old_srcrev = rd.getVar('SRCREV') or ''
upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
patches_dir)
+ logger.debug('Patches: update %s, new %s, delete %s' % (dict(upd_p), dict(new_p), dict(del_p)))
# Remove deleted local files and "overlapping" patches
- remove_files = list(del_f.values()) + list(upd_p.values())
+ remove_files = list(del_f.values()) + list(upd_p.values()) + list(del_p.values())
if remove_files:
removedentries = _remove_file_entries(srcuri, remove_files)[0]
update_srcuri = True
@@ -1135,29 +1529,36 @@ def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remo
if update_srcuri:
removevalues = {'SRC_URI': removedentries}
patchfields['SRC_URI'] = '\\\n '.join(srcuri)
- _, destpath = oe.recipeutils.bbappend_recipe(
- rd, appendlayerdir, files, wildcardver=wildcard_version,
- extralines=patchfields, removevalues=removevalues)
+ if dry_run_outdir:
+ logger.info('Creating bbappend (dry-run)')
+ else:
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
+ extralines=patchfields, removevalues=removevalues,
+ redirect_output=dry_run_outdir)
else:
files_dir = _determine_files_dir(rd)
for basepath, path in upd_f.items():
- logger.info('Updating file %s' % basepath)
+ logger.info('Updating file %s%s' % (basepath, dry_run_suffix))
if os.path.isabs(basepath):
# Original file (probably with subdir pointing inside source tree)
# so we do not want to move it, just copy
- _copy_file(basepath, path)
+ _copy_file(basepath, path, dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
else:
- _move_file(os.path.join(local_files_dir, basepath), path)
+ _move_file(os.path.join(local_files_dir, basepath), path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
update_srcuri= True
for basepath, path in new_f.items():
- logger.info('Adding new file %s' % basepath)
+ logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
_move_file(os.path.join(local_files_dir, basepath),
- os.path.join(files_dir, basepath))
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
srcuri.append('file://%s' % basepath)
update_srcuri = True
if update_srcuri:
patchfields['SRC_URI'] = ' '.join(srcuri)
- oe.recipeutils.patch_recipe(rd, recipefile, patchfields)
+ ret = oe.recipeutils.patch_recipe(rd, recipefile, patchfields, redirect_output=dry_run_outdir)
finally:
shutil.rmtree(tempdir)
if not 'git://' in orig_src_uri:
@@ -1165,51 +1566,70 @@ def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remo
'point to a git repository where you have pushed your '
'changes')
- _remove_source_files(appendlayerdir, remove_files, destpath)
- return True
+ _remove_source_files(appendlayerdir, remove_files, destpath, no_report_remove, dry_run=dry_run_outdir)
+ return True, appendfile, remove_files
-def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, initial_rev):
+def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, no_report_remove, initial_rev, dry_run_outdir=None, force_patch_refresh=False):
"""Implement the 'patch' mode of update-recipe"""
import bb
import oe.recipeutils
- recipefile = rd.getVar('FILE', True)
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.dirname(recipefile)
append = workspace[recipename]['bbappend']
if not os.path.exists(append):
raise DevtoolError('unable to find workspace bbappend for recipe %s' %
recipename)
- initial_rev, update_rev, changed_revs = _get_patchset_revs(srctree, append, initial_rev)
+ initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
if not initial_rev:
raise DevtoolError('Unable to find initial revision - please specify '
'it with --initial-rev')
+ appendfile = None
+ dl_dir = rd.getVar('DL_DIR')
+ if not dl_dir.endswith('/'):
+ dl_dir += '/'
+
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
+ if filter_patches:
+ upd_f = {}
+ new_f = {}
+ del_f = {}
+ else:
+ srctreebase = workspace[recipename]['srctreebase']
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
remove_files = []
if not no_remove:
# Get all patches from source tree and check if any should be removed
all_patches_dir = tempfile.mkdtemp(dir=tempdir)
- upd_p, new_p, del_p = _export_patches(srctree, rd, initial_rev,
- all_patches_dir)
+ _, _, del_p = _export_patches(srctree, rd, initial_rev,
+ all_patches_dir)
# Remove deleted local files and patches
remove_files = list(del_f.values()) + list(del_p.values())
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
- upd_p, new_p, del_p = _export_patches(srctree, rd, update_rev,
- patches_dir)
+ upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
+ patches_dir, changed_revs)
+ logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
+ if filter_patches:
+ new_p = OrderedDict()
+ upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
+ remove_files = [f for f in remove_files if f in filter_patches]
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
if appendlayerdir:
- files = dict((os.path.join(local_files_dir, key), val) for
+ files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
- files.update(dict((os.path.join(patches_dir, key), val) for
+ files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
if files or remove_files:
removevalues = None
@@ -1220,65 +1640,76 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
remaining = ['file://' + os.path.basename(item) for
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
- _, destpath = oe.recipeutils.bbappend_recipe(
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
rd, appendlayerdir, files,
wildcardver=wildcard_version,
- removevalues=removevalues)
+ removevalues=removevalues,
+ redirect_output=dry_run_outdir)
else:
logger.info('No patches or local source files needed updating')
else:
# Update existing files
+ files_dir = _determine_files_dir(rd)
for basepath, path in upd_f.items():
logger.info('Updating file %s' % basepath)
if os.path.isabs(basepath):
# Original file (probably with subdir pointing inside source tree)
# so we do not want to move it, just copy
- _copy_file(basepath, path)
+ _copy_file(basepath, path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
else:
- _move_file(os.path.join(local_files_dir, basepath), path)
+ _move_file(os.path.join(local_files_dir, basepath), path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
for basepath, path in upd_p.items():
patchfn = os.path.join(patches_dir, basepath)
- if changed_revs is not None:
- # Avoid updating patches that have not actually changed
- with open(patchfn, 'r') as f:
- firstlineitems = f.readline().split()
- if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
- if not firstlineitems[1] in changed_revs:
- continue
- logger.info('Updating patch %s' % basepath)
- _move_file(patchfn, path)
+ if os.path.dirname(path) + '/' == dl_dir:
+ # This is a a downloaded patch file - we now need to
+ # replace the entry in SRC_URI with our local version
+ logger.info('Replacing remote patch %s with updated local version' % basepath)
+ path = os.path.join(files_dir, basepath)
+ _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+ updaterecipe = True
+ else:
+ logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
+ _move_file(patchfn, path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
# Add any new files
- files_dir = _determine_files_dir(rd)
for basepath, path in new_f.items():
- logger.info('Adding new file %s' % basepath)
+ logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
_move_file(os.path.join(local_files_dir, basepath),
- os.path.join(files_dir, basepath))
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
srcuri.append('file://%s' % basepath)
updaterecipe = True
for basepath, path in new_p.items():
- logger.info('Adding new patch %s' % basepath)
+ logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
_move_file(os.path.join(patches_dir, basepath),
- os.path.join(files_dir, basepath))
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
srcuri.append('file://%s' % basepath)
updaterecipe = True
# Update recipe, if needed
if _remove_file_entries(srcuri, remove_files)[0]:
updaterecipe = True
if updaterecipe:
- logger.info('Updating recipe %s' % os.path.basename(recipefile))
- oe.recipeutils.patch_recipe(rd, recipefile,
- {'SRC_URI': ' '.join(srcuri)})
+ if not dry_run_outdir:
+ logger.info('Updating recipe %s' % os.path.basename(recipefile))
+ ret = oe.recipeutils.patch_recipe(rd, recipefile,
+ {'SRC_URI': ' '.join(srcuri)},
+ redirect_output=dry_run_outdir)
elif not updatefiles:
# Neither patches nor recipe were updated
logger.info('No patches or files need updating')
- return False
+ return False, None, []
finally:
shutil.rmtree(tempdir)
- _remove_source_files(appendlayerdir, remove_files, destpath)
- return True
+ _remove_source_files(appendlayerdir, remove_files, destpath, no_report_remove, dry_run=dry_run_outdir)
+ return True, appendfile, remove_files
def _guess_recipe_update_mode(srctree, rdata):
"""Guess the recipe update mode to use"""
@@ -1302,18 +1733,73 @@ def _guess_recipe_update_mode(srctree, rdata):
return 'patch'
-def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_version, no_remove, initial_rev):
+def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_version, no_remove, initial_rev, no_report_remove=False, dry_run_outdir=None, no_overrides=False, force_patch_refresh=False):
srctree = workspace[recipename]['srctree']
if mode == 'auto':
mode = _guess_recipe_update_mode(srctree, rd)
- if mode == 'srcrev':
- updated = _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remove)
- elif mode == 'patch':
- updated = _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, initial_rev)
- else:
- raise DevtoolError('update_recipe: invalid mode %s' % mode)
- return updated
+ override_branches = []
+ mainbranch = None
+ startbranch = None
+ if not no_overrides:
+ stdout, _ = bb.process.run('git branch', cwd=srctree)
+ other_branches = []
+ for line in stdout.splitlines():
+ branchname = line[2:]
+ if line.startswith('* '):
+ startbranch = branchname
+ if branchname.startswith(override_branch_prefix):
+ override_branches.append(branchname)
+ else:
+ other_branches.append(branchname)
+
+ if override_branches:
+ logger.debug('_update_recipe: override branches: %s' % override_branches)
+ logger.debug('_update_recipe: other branches: %s' % other_branches)
+ if startbranch.startswith(override_branch_prefix):
+ if len(other_branches) == 1:
+ mainbranch = other_branches[1]
+ else:
+ raise DevtoolError('Unable to determine main branch - please check out the main branch in source tree first')
+ else:
+ mainbranch = startbranch
+
+ checkedout = None
+ anyupdated = False
+ appendfile = None
+ allremoved = []
+ if override_branches:
+ logger.info('Handling main branch (%s)...' % mainbranch)
+ if startbranch != mainbranch:
+ bb.process.run('git checkout %s' % mainbranch, cwd=srctree)
+ checkedout = mainbranch
+ try:
+ branchlist = [mainbranch] + override_branches
+ for branch in branchlist:
+ crd = bb.data.createCopy(rd)
+ if branch != mainbranch:
+ logger.info('Handling branch %s...' % branch)
+ override = branch[len(override_branch_prefix):]
+ crd.appendVar('OVERRIDES', ':%s' % override)
+ bb.process.run('git checkout %s' % branch, cwd=srctree)
+ checkedout = branch
+
+ if mode == 'srcrev':
+ updated, appendf, removed = _update_recipe_srcrev(recipename, workspace, srctree, crd, appendlayerdir, wildcard_version, no_remove, no_report_remove, dry_run_outdir)
+ elif mode == 'patch':
+ updated, appendf, removed = _update_recipe_patch(recipename, workspace, srctree, crd, appendlayerdir, wildcard_version, no_remove, no_report_remove, initial_rev, dry_run_outdir, force_patch_refresh)
+ else:
+ raise DevtoolError('update_recipe: invalid mode %s' % mode)
+ if updated:
+ anyupdated = True
+ if appendf:
+ appendfile = appendf
+ allremoved.extend(removed)
+ finally:
+ if startbranch and checkedout != startbranch:
+ bb.process.run('git checkout %s' % startbranch, cwd=srctree)
+
+ return anyupdated, appendfile, allremoved
def update_recipe(args, config, basepath, workspace):
"""Entry point for the devtool 'update-recipe' subcommand"""
@@ -1334,12 +1820,17 @@ def update_recipe(args, config, basepath, workspace):
if not rd:
return 1
- updated = _update_recipe(args.recipename, workspace, rd, args.mode, args.append, args.wildcard_version, args.no_remove, args.initial_rev)
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+ updated, _, _ = _update_recipe(args.recipename, workspace, rd, args.mode, args.append, args.wildcard_version, args.no_remove, args.initial_rev, dry_run_outdir=dry_run_outdir, no_overrides=args.no_overrides, force_patch_refresh=args.force_patch_refresh)
if updated:
- rf = rd.getVar('FILE', True)
+ rf = rd.getVar('FILE')
if rf.startswith(config.workspace_path):
- logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+ logger.warning('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
finally:
tinfoil.shutdown()
@@ -1349,7 +1840,7 @@ def update_recipe(args, config, basepath, workspace):
def status(args, config, basepath, workspace):
"""Entry point for the devtool 'status' subcommand"""
if workspace:
- for recipe, value in workspace.items():
+ for recipe, value in sorted(workspace.items()):
recipefile = value['recipefile']
if recipefile:
recipestr = ' (%s)' % recipefile
@@ -1361,8 +1852,29 @@ def status(args, config, basepath, workspace):
return 0
-def _reset(recipes, no_clean, config, basepath, workspace):
+def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
"""Reset one or more recipes"""
+ import oe.path
+
+ def clean_preferred_provider(pn, layerconf_path):
+ """Remove PREFERRED_PROVIDER from layer.conf'"""
+ import re
+ layerconf_file = os.path.join(layerconf_path, 'conf', 'layer.conf')
+ new_layerconf_file = os.path.join(layerconf_path, 'conf', '.layer.conf')
+ pprovider_found = False
+ with open(layerconf_file, 'r') as f:
+ lines = f.readlines()
+ with open(new_layerconf_file, 'a') as nf:
+ for line in lines:
+ pprovider_exp = r'^PREFERRED_PROVIDER_.*? = "' + pn + r'"$'
+ if not re.match(pprovider_exp, line):
+ nf.write(line)
+ else:
+ pprovider_found = True
+ if pprovider_found:
+ shutil.move(new_layerconf_file, layerconf_file)
+ else:
+ os.remove(new_layerconf_file)
if recipes and not no_clean:
if len(recipes) == 1:
@@ -1389,37 +1901,57 @@ def _reset(recipes, no_clean, config, basepath, workspace):
for pn in recipes:
_check_preserve(config, pn)
+ appendfile = workspace[pn]['bbappend']
+ if os.path.exists(appendfile):
+ # This shouldn't happen, but is possible if devtool errored out prior to
+ # writing the md5 file. We need to delete this here or the recipe won't
+ # actually be reset
+ os.remove(appendfile)
+
preservepath = os.path.join(config.workspace_path, 'attic', pn, pn)
def preservedir(origdir):
if os.path.exists(origdir):
for root, dirs, files in os.walk(origdir):
for fn in files:
- logger.warn('Preserving %s in %s' % (fn, preservepath))
+ logger.warning('Preserving %s in %s' % (fn, preservepath))
_move_file(os.path.join(origdir, fn),
os.path.join(preservepath, fn))
for dn in dirs:
preservedir(os.path.join(root, dn))
os.rmdir(origdir)
- preservedir(os.path.join(config.workspace_path, 'recipes', pn))
+ recipefile = workspace[pn]['recipefile']
+ if recipefile and oe.path.is_path_parent(config.workspace_path, recipefile):
+ # This should always be true if recipefile is set, but just in case
+ preservedir(os.path.dirname(recipefile))
# We don't automatically create this dir next to appends, but the user can
preservedir(os.path.join(config.workspace_path, 'appends', pn))
- srctree = workspace[pn]['srctree']
- if os.path.isdir(srctree):
- if os.listdir(srctree):
- # We don't want to risk wiping out any work in progress
- logger.info('Leaving source tree %s as-is; if you no '
- 'longer need it then please delete it manually'
- % srctree)
+ srctreebase = workspace[pn]['srctreebase']
+ if os.path.isdir(srctreebase):
+ if os.listdir(srctreebase):
+ if remove_work:
+ logger.info('-r argument used on %s, removing source tree.'
+ ' You will lose any unsaved work' %pn)
+ shutil.rmtree(srctreebase)
+ else:
+ # We don't want to risk wiping out any work in progress
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
else:
# This is unlikely, but if it's empty we can just remove it
- os.rmdir(srctree)
+ os.rmdir(srctreebase)
+ clean_preferred_provider(pn, config.workspace_path)
def reset(args, config, basepath, workspace):
"""Entry point for the devtool 'reset' subcommand"""
import bb
+ import shutil
+
+ recipes = ""
+
if args.recipename:
if args.all:
raise DevtoolError("Recipe cannot be specified if -a/--all is used")
@@ -1434,23 +1966,35 @@ def reset(args, config, basepath, workspace):
else:
recipes = args.recipename
- _reset(recipes, args.no_clean, config, basepath, workspace)
+ _reset(recipes, args.no_clean, args.remove_work, config, basepath, workspace)
return 0
def _get_layer(layername, d):
"""Determine the base layer path for the specified layer name/path"""
- layerdirs = d.getVar('BBLAYERS', True).split()
- layers = {os.path.basename(p): p for p in layerdirs}
+ layerdirs = d.getVar('BBLAYERS').split()
+ layers = {} # {basename: layer_paths}
+ for p in layerdirs:
+ bn = os.path.basename(p)
+ if bn not in layers:
+ layers[bn] = [p]
+ else:
+ layers[bn].append(p)
# Provide some shortcuts
if layername.lower() in ['oe-core', 'openembedded-core']:
- layerdir = layers.get('meta', None)
+ layername = 'meta'
+ layer_paths = layers.get(layername, None)
+ if not layer_paths:
+ return os.path.abspath(layername)
+ elif len(layer_paths) == 1:
+ return os.path.abspath(layer_paths[0])
else:
- layerdir = layers.get(layername, None)
- if layerdir:
- layerdir = os.path.abspath(layerdir)
- return layerdir or layername
+ # multiple layers having the same base name
+ logger.warning("Multiple layers have the same base name '%s', use the first one '%s'." % (layername, layer_paths[0]))
+ logger.warning("Consider using path instead of base name to specify layer:\n\t\t%s" % '\n\t\t'.join(layer_paths))
+ return os.path.abspath(layer_paths[0])
+
def finish(args, config, basepath, workspace):
"""Entry point for the devtool 'finish' subcommand"""
@@ -1459,6 +2003,22 @@ def finish(args, config, basepath, workspace):
check_workspace_recipe(workspace, args.recipename)
+ dry_run_suffix = ' (dry-run)' if args.dry_run else ''
+
+ # Grab the equivalent of COREBASE without having to initialise tinfoil
+ corebasedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+
+ srctree = workspace[args.recipename]['srctree']
+ check_git_repo_op(srctree, [corebasedir])
+ dirty = check_git_repo_dirty(srctree)
+ if dirty:
+ if args.force:
+ logger.warning('Source tree is not clean, continuing as requested by -f/--force')
+ else:
+ raise DevtoolError('Source tree is not clean:\n\n%s\nEnsure you have committed your changes or use -f/--force if you are sure there\'s nothing that needs to be committed' % dirty)
+
+ no_clean = args.no_clean
+ remove_work=args.remove_work
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
@@ -1466,7 +2026,9 @@ def finish(args, config, basepath, workspace):
return 1
destlayerdir = _get_layer(args.destination, tinfoil.config_data)
- origlayerdir = oe.recipeutils.find_layerdir(rd.getVar('FILE', True))
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.dirname(recipefile)
+ origlayerdir = oe.recipeutils.find_layerdir(recipefile)
if not os.path.isdir(destlayerdir):
raise DevtoolError('Unable to find layer or directory matching "%s"' % args.destination)
@@ -1485,6 +2047,8 @@ def finish(args, config, basepath, workspace):
elif line.startswith('# original_files:'):
origfilelist = line.split(':')[1].split()
+ destlayerbasedir = oe.recipeutils.find_layerdir(destlayerdir)
+
if origlayerdir == config.workspace_path:
# Recipe file itself is in workspace, update it there first
appendlayerdir = None
@@ -1496,6 +2060,11 @@ def finish(args, config, basepath, workspace):
destpath = oe.recipeutils.get_bbfile_path(rd, destlayerdir, origrelpath)
if not destpath:
raise DevtoolError("Unable to determine destination layer path - check that %s specifies an actual layer and %s/conf/layer.conf specifies BBFILES. You may also need to specify a more complete path." % (args.destination, destlayerdir))
+ # Warn if the layer isn't in bblayers.conf (the code to create a bbappend will do this in other cases)
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
+ if not os.path.abspath(destlayerbasedir) in layerdirs:
+ bb.warn('Specified destination layer is not currently enabled in bblayers.conf, so the %s recipe will now be unavailable in your current configuration until you add the layer there' % args.recipename)
+
elif destlayerdir == origlayerdir:
# Same layer, update the original recipe
appendlayerdir = None
@@ -1505,36 +2074,103 @@ def finish(args, config, basepath, workspace):
appendlayerdir = destlayerdir
destpath = None
+ # Actually update the recipe / bbappend
+ removing_original = (origpath and origfilelist and oe.recipeutils.find_layerdir(origpath) == destlayerbasedir)
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+ updated, appendfile, removed = _update_recipe(args.recipename, workspace, rd, args.mode, appendlayerdir, wildcard_version=True, no_remove=False, no_report_remove=removing_original, initial_rev=args.initial_rev, dry_run_outdir=dry_run_outdir, no_overrides=args.no_overrides, force_patch_refresh=args.force_patch_refresh)
+ removed = [os.path.relpath(pth, recipedir) for pth in removed]
+
# Remove any old files in the case of an upgrade
- if origpath and origfilelist and oe.recipeutils.find_layerdir(origpath) == oe.recipeutils.find_layerdir(destlayerdir):
+ if removing_original:
for fn in origfilelist:
fnp = os.path.join(origpath, fn)
- try:
- os.remove(fnp)
- except FileNotFoundError:
- pass
-
- # Actually update the recipe / bbappend
- _update_recipe(args.recipename, workspace, rd, args.mode, appendlayerdir, wildcard_version=True, no_remove=False, initial_rev=args.initial_rev)
+ if fn in removed or not os.path.exists(os.path.join(recipedir, fn)):
+ logger.info('Removing file %s%s' % (fnp, dry_run_suffix))
+ if not args.dry_run:
+ try:
+ os.remove(fnp)
+ except FileNotFoundError:
+ pass
if origlayerdir == config.workspace_path and destpath:
# Recipe file itself is in the workspace - need to move it and any
# associated files to the specified layer
- logger.info('Moving recipe file to %s' % destpath)
- recipedir = os.path.dirname(rd.getVar('FILE', True))
+ no_clean = True
+ logger.info('Moving recipe file to %s%s' % (destpath, dry_run_suffix))
for root, _, files in os.walk(recipedir):
for fn in files:
srcpath = os.path.join(root, fn)
relpth = os.path.relpath(os.path.dirname(srcpath), recipedir)
destdir = os.path.abspath(os.path.join(destpath, relpth))
- bb.utils.mkdirhier(destdir)
- shutil.move(srcpath, os.path.join(destdir, fn))
+ destfp = os.path.join(destdir, fn)
+ _move_file(srcpath, destfp, dry_run_outdir=dry_run_outdir, base_outdir=destpath)
+ if dry_run_outdir:
+ import difflib
+ comparelist = []
+ for root, _, files in os.walk(dry_run_outdir):
+ for fn in files:
+ outf = os.path.join(root, fn)
+ relf = os.path.relpath(outf, dry_run_outdir)
+ logger.debug('dry-run: output file %s' % relf)
+ if fn.endswith('.bb'):
+ if origfilelist and origpath and destpath:
+ # Need to match this up with the pre-upgrade recipe file
+ for origf in origfilelist:
+ if origf.endswith('.bb'):
+ comparelist.append((os.path.abspath(os.path.join(origpath, origf)),
+ outf,
+ os.path.abspath(os.path.join(destpath, relf))))
+ break
+ else:
+ # Compare to the existing recipe
+ comparelist.append((recipefile, outf, recipefile))
+ elif fn.endswith('.bbappend'):
+ if appendfile:
+ if os.path.exists(appendfile):
+ comparelist.append((appendfile, outf, appendfile))
+ else:
+ comparelist.append((None, outf, appendfile))
+ else:
+ if destpath:
+ recipedest = destpath
+ elif appendfile:
+ recipedest = os.path.dirname(appendfile)
+ else:
+ recipedest = os.path.dirname(recipefile)
+ destfp = os.path.join(recipedest, relf)
+ if os.path.exists(destfp):
+ comparelist.append((destfp, outf, destfp))
+ output = ''
+ for oldfile, newfile, newfileshow in comparelist:
+ if oldfile:
+ with open(oldfile, 'r') as f:
+ oldlines = f.readlines()
+ else:
+ oldfile = '/dev/null'
+ oldlines = []
+ with open(newfile, 'r') as f:
+ newlines = f.readlines()
+ if not newfileshow:
+ newfileshow = newfile
+ diff = difflib.unified_diff(oldlines, newlines, oldfile, newfileshow)
+ difflines = list(diff)
+ if difflines:
+ output += ''.join(difflines)
+ if output:
+ logger.info('Diff of changed files:\n%s' % output)
finally:
tinfoil.shutdown()
# Everything else has succeeded, we can now reset
- _reset([args.recipename], no_clean=False, config=config, basepath=basepath, workspace=workspace)
+ if args.dry_run:
+ logger.info('Resetting recipe (dry-run)')
+ else:
+ _reset([args.recipename], no_clean=no_clean, remove_work=remove_work, config=config, basepath=basepath, workspace=workspace)
return 0
@@ -1561,13 +2197,19 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
+ parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
- parser_add.add_argument('--autorev', '-a', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ group = parser_add.add_mutually_exclusive_group()
+ group.add_argument('--srcrev', '-S', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
+ group.add_argument('--autorev', '-a', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ parser_add.add_argument('--srcbranch', '-B', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_add.add_argument('--binary', '-b', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure). Useful with binary packages e.g. RPMs.', action='store_true')
parser_add.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
parser_add.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
- parser_add.set_defaults(func=add)
+ parser_add.add_argument('--mirrors', help='Enable PREMIRRORS and MIRRORS for source tree fetching (disable by default).', action="store_true")
+ parser_add.add_argument('--provides', '-p', help='Specify an alias for the item provided by the recipe. E.g. virtual/libgl')
+ parser_add.set_defaults(func=add, fixed_setup=context.fixed_setup)
parser_modify = subparsers.add_parser('modify', help='Modify the source for an existing recipe',
description='Sets up the build environment to modify the source for an existing recipe. The default behaviour is to extract the source being fetched by the recipe into a git tree so you can work on it; alternatively if you already have your own pre-prepared source tree you can specify -n/--no-extract.',
@@ -1582,7 +2224,9 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_modify.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (when not using -n/--no-extract) (default "%(default)s")')
- parser_modify.set_defaults(func=modify)
+ parser_modify.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
+ parser_modify.add_argument('--keep-temp', help='Keep temporary directory (for debugging)', action="store_true")
+ parser_modify.set_defaults(func=modify, fixed_setup=context.fixed_setup)
parser_extract = subparsers.add_parser('extract', help='Extract the source for an existing recipe',
description='Extracts the source for an existing recipe',
@@ -1590,8 +2234,9 @@ def register_commands(subparsers, context):
parser_extract.add_argument('recipename', help='Name of recipe to extract the source for')
parser_extract.add_argument('srctree', help='Path to where to extract the source tree')
parser_extract.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (default "%(default)s")')
+ parser_extract.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
parser_extract.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_extract.set_defaults(func=extract, no_workspace=True)
+ parser_extract.set_defaults(func=extract, fixed_setup=context.fixed_setup)
parser_sync = subparsers.add_parser('sync', help='Synchronize the source tree for an existing recipe',
description='Synchronize the previously extracted source tree for an existing recipe',
@@ -1601,7 +2246,16 @@ def register_commands(subparsers, context):
parser_sync.add_argument('srctree', help='Path to the source tree')
parser_sync.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout')
parser_sync.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_sync.set_defaults(func=sync)
+ parser_sync.set_defaults(func=sync, fixed_setup=context.fixed_setup)
+
+ parser_rename = subparsers.add_parser('rename', help='Rename a recipe file in the workspace',
+ description='Renames the recipe file for a recipe in the workspace, changing the name or version part or both, ensuring that all references within the workspace are updated at the same time. Only works when the recipe file itself is in the workspace, e.g. after devtool add. Particularly useful when devtool add did not automatically determine the correct name.',
+ group='working', order=10)
+ parser_rename.add_argument('recipename', help='Current name of recipe to rename')
+ parser_rename.add_argument('newname', nargs='?', help='New name for recipe (optional, not needed if you only want to change the version)')
+ parser_rename.add_argument('--version', '-V', help='Change the version (NOTE: this does not change the version fetched by the recipe, just the version in the recipe file name)')
+ parser_rename.add_argument('--no-srctree', '-s', action='store_true', help='Do not rename the source tree directory (if the default source tree path has been used) - keeping the old name may be desirable if there are internal/other external references to this path')
+ parser_rename.set_defaults(func=rename)
parser_update_recipe = subparsers.add_parser('update-recipe', help='Apply changes from external source tree to recipe',
description='Applies changes from external source tree to a recipe (updating/adding/removing patches as necessary, or by updating SRCREV). Note that these changes need to have been committed to the git repository in order to be recognised.',
@@ -1612,6 +2266,9 @@ def register_commands(subparsers, context):
parser_update_recipe.add_argument('--append', '-a', help='Write changes to a bbappend in the specified layer instead of the recipe', metavar='LAYERDIR')
parser_update_recipe.add_argument('--wildcard-version', '-w', help='In conjunction with -a/--append, use a wildcard to make the bbappend apply to any recipe version', action='store_true')
parser_update_recipe.add_argument('--no-remove', '-n', action="store_true", help='Don\'t remove patches, only add or update')
+ parser_update_recipe.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
+ parser_update_recipe.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
+ parser_update_recipe.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
parser_update_recipe.set_defaults(func=update_recipe)
parser_status = subparsers.add_parser('status', help='Show workspace status',
@@ -1625,13 +2282,20 @@ def register_commands(subparsers, context):
parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory along with append')
parser_reset.set_defaults(func=reset)
parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
- description='Pushes any committed changes to the specified recipe to the specified layer and removes it from your workspace. Roughly equivalent to an update-recipe followed by reset, except the update-recipe step will do the "right thing" depending on the recipe and the destination layer specified.',
+ description='Pushes any committed changes to the specified recipe to the specified layer and removes it from your workspace. Roughly equivalent to an update-recipe followed by reset, except the update-recipe step will do the "right thing" depending on the recipe and the destination layer specified. Note that your changes must have been committed to the git repository in order to be recognised.',
group='working', order=-100)
parser_finish.add_argument('recipename', help='Recipe to finish')
parser_finish.add_argument('destination', help='Layer/path to put recipe into. Can be the name of a layer configured in your bblayers.conf, the path to the base of a layer, or a partial path inside a layer. %(prog)s will attempt to complete the path based on the layer\'s structure.')
parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
+ parser_finish.add_argument('--force', '-f', action="store_true", help='Force continuing even if there are uncommitted changes in the source tree repository')
+ parser_finish.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory under workspace')
+ parser_finish.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_finish.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
+ parser_finish.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
+ parser_finish.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
parser_finish.set_defaults(func=finish)
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index a4239f1cd2..cb6dce378a 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -1,19 +1,8 @@
# Development tool - upgrade command plugin
#
-# Copyright (C) 2014-2015 Intel Corporation
+# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
@@ -27,9 +16,13 @@ import argparse
import scriptutils
import errno
import bb
+
+devtool_path = os.path.dirname(os.path.realpath(__file__)) + '/../../../meta/lib'
+sys.path = sys.path + [devtool_path]
+
import oe.recipeutils
from devtool import standard
-from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build
+from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build, update_unlockedsigs, check_prerelease_version
logger = logging.getLogger('devtool')
@@ -39,7 +32,7 @@ def _run(cmd, cwd=''):
def _get_srctree(tmpdir):
srctree = tmpdir
- dirs = os.listdir(tmpdir)
+ dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
return srctree
@@ -51,24 +44,13 @@ def _copy_source_code(orig, dest):
dest_path = os.path.join(dest, path)
shutil.move(os.path.join(orig, path), dest_path)
-def _get_checksums(rf):
- import re
- checksums = {}
- with open(rf) as f:
- for line in f:
- for cs in ['md5sum', 'sha256sum']:
- m = re.match("^SRC_URI\[%s\].*=.*\"(.*)\"" % cs, line)
- if m:
- checksums[cs] = m.group(1)
- return checksums
-
def _remove_patch_dirs(recipefolder):
for root, dirs, files in os.walk(recipefolder):
for d in dirs:
shutil.rmtree(os.path.join(root,d))
def _recipe_contains(rd, var):
- rf = rd.getVar('FILE', True)
+ rf = rd.getVar('FILE')
varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
for var, fn in varfiles.items():
if fn and fn.startswith(os.path.dirname(rf) + os.sep):
@@ -117,7 +99,7 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
srctree = os.path.abspath(srctree)
- pn = d.getVar('PN',True)
+ pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
@@ -132,7 +114,7 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
if rev:
f.write('# initial_rev: %s\n' % rev)
if copied:
- f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE', True)))
+ f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
@@ -140,21 +122,25 @@ def _cleanup_on_error(rf, srctree):
rfp = os.path.split(rf)[0] # recipe folder
rfpp = os.path.split(rfp)[0] # recipes folder
if os.path.exists(rfp):
- shutil.rmtree(b)
+ shutil.rmtree(rfp)
if not len(os.listdir(rfpp)):
os.rmdir(rfpp)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree):
- if rf:
- cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None):
+ if rf and not keep_failure:
+ _cleanup_on_error(rf, srctree)
logger.error(e)
- raise DevtoolError(e)
+ if extramsg:
+ logger.error(extramsg)
+ if keep_failure:
+ logger.info('Preserving failed upgrade files (--keep-failure)')
+ sys.exit(1)
def _get_uri(rd):
- srcuris = rd.getVar('SRC_URI', True).split()
+ srcuris = rd.getVar('SRC_URI').split()
if not len(srcuris):
raise DevtoolError('SRC_URI not found on recipe')
# Get first non-local entry in SRC_URI - usually by convention it's
@@ -176,7 +162,7 @@ def _get_uri(rd):
srcuri = rev_re.sub('', srcuri)
return srcuri, srcrev
-def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tinfoil, rd):
+def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, keep_temp, tinfoil, rd):
"""Extract sources of a recipe with a new version"""
def __run(cmd):
@@ -185,7 +171,7 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
crd = rd.createCopy()
- pv = crd.getVar('PV', True)
+ pv = crd.getVar('PV')
crd.setVar('PV', newpv)
tmpsrctree = None
@@ -198,17 +184,43 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
__run('git tag -f devtool-base-new')
md5 = None
sha256 = None
+ _, _, _, _, _, params = bb.fetch2.decodeurl(uri)
+ srcsubdir_rel = params.get('destsuffix', 'git')
+ if not srcbranch:
+ check_branch, check_branch_err = __run('git branch -r --contains %s' % srcrev)
+ get_branch = [x.strip() for x in check_branch.splitlines()]
+ # Remove HEAD reference point and drop remote prefix
+ get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
+ if 'master' in get_branch:
+ # If it is master, we do not need to append 'branch=master' as this is default.
+ # Even with the case where get_branch has multiple objects, if 'master' is one
+ # of them, we should default take from 'master'
+ srcbranch = ''
+ elif len(get_branch) == 1:
+ # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ srcbranch = get_branch[0]
+ else:
+ # If get_branch contains more than one objects, then display error and exit.
+ mbrch = '\n ' + '\n '.join(get_branch)
+ raise DevtoolError('Revision %s was found on multiple branches: %s\nPlease provide the correct branch in the devtool command with "--srcbranch" or "-B" option.' % (srcrev, mbrch))
else:
__run('git checkout devtool-base -b devtool-%s' % newpv)
tmpdir = tempfile.mkdtemp(prefix='devtool')
try:
- md5, sha256 = scriptutils.fetch_uri(tinfoil.config_data, uri, tmpdir, rev)
- except bb.fetch2.FetchError as e:
+ checksums, ftmpdir = scriptutils.fetch_url(tinfoil, uri, rev, tmpdir, logger, preserve_tmp=keep_temp)
+ except scriptutils.FetchUrlFailure as e:
raise DevtoolError(e)
+ if ftmpdir and keep_temp:
+ logger.info('Fetch temp directory is %s' % ftmpdir)
+
+ md5 = checksums['md5sum']
+ sha256 = checksums['sha256sum']
+
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
+ srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
# Delete all sources so we ensure no stray files are left over
for item in os.listdir(srctree):
@@ -224,8 +236,15 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
_copy_source_code(tmpsrctree, srctree)
(stdout,_) = __run('git ls-files --modified --others --exclude-standard')
- for f in stdout.splitlines():
- __run('git add "%s"' % f)
+ filelist = stdout.splitlines()
+ pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
+ pbar.start()
+ batchsize = 100
+ for i in range(0, len(filelist), batchsize):
+ batch = filelist[i:i+batchsize]
+ __run('git add -A %s' % ' '.join(['"%s"' % item for item in batch]))
+ pbar.update(i)
+ pbar.finish()
useroptions = []
oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
@@ -237,10 +256,8 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
- if len(patches):
- logger.warn('By user choice, the following patches will NOT be applied')
- for patch in patches:
- logger.warn("%s" % os.path.basename(patch))
+ if patches:
+ logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
__run('git checkout devtool-patched -b %s' % branch)
skiptag = False
@@ -249,9 +266,9 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
except bb.process.ExecutionError as e:
skiptag = True
if 'conflict' in e.stdout:
- logger.warn('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
else:
- logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
if not skiptag:
if uri.startswith('git://'):
suffix = 'new'
@@ -264,21 +281,45 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
+ if tmpdir != tmpsrctree:
+ shutil.rmtree(tmpdir)
- return (rev, md5, sha256)
+ return (rev, md5, sha256, srcbranch, srcsubdir_rel)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil, rd):
+def _add_license_diff_to_recipe(path, diff):
+ notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
+# The following is the difference between the old and the new license text.
+# Please update the LICENSE value if needed, and summarize the changes in
+# the commit message via 'License-Update:' tag.
+# (example: 'License-Update: copyright years updated.')
+#
+# The changes:
+#
+"""
+ commented_diff = "\n".join(["# {}".format(l) for l in diff.split('\n')])
+ with open(path, 'rb') as f:
+ orig_content = f.read()
+ with open(path, 'wb') as f:
+ f.write(notice_text.encode())
+ f.write(commented_diff.encode())
+ f.write("\n#\n\n".encode())
+ f.write(orig_content)
+
+def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
- bpn = rd.getVar('BPN', True)
+ bpn = rd.getVar('BPN')
path = os.path.join(workspace, 'recipes', bpn)
bb.utils.mkdirhier(path)
- copied, _ = oe.recipeutils.copy_recipe_files(rd, path)
+ copied, _ = oe.recipeutils.copy_recipe_files(rd, path, all_variants=True)
+ if not copied:
+ raise DevtoolError('Internal error - no files were copied for recipe %s' % bpn)
+ logger.debug('Copied %s to %s' % (copied, path))
- oldpv = rd.getVar('PV', True)
+ oldpv = rd.getVar('PV')
if not newpv:
newpv = oldpv
- origpath = rd.getVar('FILE', True)
+ origpath = rd.getVar('FILE')
fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
logger.debug('Upgraded %s => %s' % (origpath, fullpath))
@@ -316,32 +357,160 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil
newvalues['PR'] = None
+ # Work out which SRC_URI entries have changed in case the entry uses a name
+ crd = rd.createCopy()
+ crd.setVar('PV', newpv)
+ for var, value in newvalues.items():
+ crd.setVar(var, value)
+ old_src_uri = (rd.getVar('SRC_URI') or '').split()
+ new_src_uri = (crd.getVar('SRC_URI') or '').split()
+ newnames = []
+ addnames = []
+ for newentry in new_src_uri:
+ _, _, _, _, _, params = bb.fetch2.decodeurl(newentry)
+ if 'name' in params:
+ newnames.append(params['name'])
+ if newentry not in old_src_uri:
+ addnames.append(params['name'])
+ # Find what's been set in the original recipe
+ oldnames = []
+ noname = False
+ for varflag in rd.getVarFlags('SRC_URI'):
+ if varflag.endswith(('.md5sum', '.sha256sum')):
+ name = varflag.rsplit('.', 1)[0]
+ if name not in oldnames:
+ oldnames.append(name)
+ elif varflag in ['md5sum', 'sha256sum']:
+ noname = True
+ # Even if SRC_URI has named entries it doesn't have to actually use the name
+ if noname and addnames and addnames[0] not in oldnames:
+ addnames = []
+ # Drop any old names (the name actually might include ${PV})
+ for name in oldnames:
+ if name not in newnames:
+ newvalues['SRC_URI[%s.md5sum]' % name] = None
+ newvalues['SRC_URI[%s.sha256sum]' % name] = None
+
if md5 and sha256:
- newvalues['SRC_URI[md5sum]'] = md5
- newvalues['SRC_URI[sha256sum]'] = sha256
+ if addnames:
+ nameprefix = '%s.' % addnames[0]
+ else:
+ nameprefix = ''
+ newvalues['SRC_URI[%smd5sum]' % nameprefix] = md5
+ newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
+
+ if srcsubdir_new != srcsubdir_old:
+ s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
+ s_subdir_new = os.path.relpath(os.path.abspath(crd.getVar('S')), crd.getVar('WORKDIR'))
+ if srcsubdir_old == s_subdir_old and srcsubdir_new != s_subdir_new:
+ # Subdir for old extracted source matches what S points to (it should!)
+ # but subdir for new extracted source doesn't match what S will be
+ newvalues['S'] = '${WORKDIR}/%s' % srcsubdir_new.replace(newpv, '${PV}')
+ if crd.expand(newvalues['S']) == crd.expand('${WORKDIR}/${BP}'):
+ # It's the default, drop it
+ # FIXME what if S is being set in a .inc?
+ newvalues['S'] = None
+ logger.info('Source subdirectory has changed, dropping S value since it now matches the default ("${WORKDIR}/${BP}")')
+ else:
+ logger.info('Source subdirectory has changed, updating S value')
- rd = oe.recipeutils.parse_recipe(tinfoil.cooker, fullpath, None)
+ if license_diff:
+ newlicchksum = " ".join(["file://{}".format(l['path']) +
+ (";beginline={}".format(l['beginline']) if l['beginline'] else "") +
+ (";endline={}".format(l['endline']) if l['endline'] else "") +
+ (";md5={}".format(l['actual_md5'])) for l in new_licenses])
+ newvalues["LIC_FILES_CHKSUM"] = newlicchksum
+ _add_license_diff_to_recipe(fullpath, license_diff)
+
+ try:
+ rd = tinfoil.parse_recipe_file(fullpath, False)
+ except bb.tinfoil.TinfoilCommandFailed as e:
+ _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
+
+def _check_git_config():
+ def getconfig(name):
+ try:
+ value = bb.process.run('git config --global %s' % name)[0].strip()
+ except bb.process.ExecutionError as e:
+ if e.exitcode == 1:
+ value = None
+ else:
+ raise
+ return value
+
+ username = getconfig('user.name')
+ useremail = getconfig('user.email')
+ configerr = []
+ if not username:
+ configerr.append('Please set your name using:\n git config --global user.name')
+ if not useremail:
+ configerr.append('Please set your email using:\n git config --global user.email')
+ if configerr:
+ raise DevtoolError('Your git configuration is incomplete which will prevent rebases from working:\n' + '\n'.join(configerr))
+
+def _extract_licenses(srcpath, recipe_licenses):
+ licenses = []
+ for url in recipe_licenses.split():
+ license = {}
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ license['path'] = path
+ license['md5'] = parm.get('md5', '')
+ license['beginline'], license['endline'] = 0, 0
+ if 'beginline' in parm:
+ license['beginline'] = int(parm['beginline'])
+ if 'endline' in parm:
+ license['endline'] = int(parm['endline'])
+ license['text'] = []
+ with open(os.path.join(srcpath, path), 'rb') as f:
+ import hashlib
+ actual_md5 = hashlib.md5()
+ lineno = 0
+ for line in f:
+ lineno += 1
+ if (lineno >= license['beginline']) and ((lineno <= license['endline']) or not license['endline']):
+ license['text'].append(line.decode(errors='ignore'))
+ actual_md5.update(line)
+ license['actual_md5'] = actual_md5.hexdigest()
+ licenses.append(license)
+ return licenses
+
+def _generate_license_diff(old_licenses, new_licenses):
+ need_diff = False
+ for l in new_licenses:
+ if l['md5'] != l['actual_md5']:
+ need_diff = True
+ break
+ if need_diff == False:
+ return None
+
+ import difflib
+ diff = ''
+ for old, new in zip(old_licenses, new_licenses):
+ for line in difflib.unified_diff(old['text'], new['text'], old['path'], new['path']):
+ diff = diff + line
+ return diff
+
def upgrade(args, config, basepath, workspace):
"""Entry point for the devtool 'upgrade' subcommand"""
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" % args.recipename)
- if not args.version and not args.srcrev:
- raise DevtoolError("You must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option")
if args.srcbranch and not args.srcrev:
raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
+ _check_git_config()
+
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if pn != args.recipename:
logger.info('Mapping %s to %s' % (args.recipename, pn))
if pn in workspace:
@@ -352,37 +521,97 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
+ # try to automatically discover latest version and revision if not provided on command line
+ if not args.version and not args.srcrev:
+ version_info = oe.recipeutils.get_recipe_upstream_version(rd)
+ if version_info['version'] and not version_info['version'].endswith("new-commits-available"):
+ args.version = version_info['version']
+ if version_info['revision']:
+ args.srcrev = version_info['revision']
+ if not args.version and not args.srcrev:
+ raise DevtoolError("Automatic discovery of latest version/revision failed - you must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option.")
+
standard._check_compatible_recipe(pn, rd)
- old_srcrev = rd.getVar('SRCREV', True)
+ old_srcrev = rd.getVar('SRCREV')
if old_srcrev == 'INVALID':
old_srcrev = None
if old_srcrev and not args.srcrev:
raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
- if rd.getVar('PV', True) == args.version and old_srcrev == args.srcrev:
+ old_ver = rd.getVar('PV')
+ if old_ver == args.version and old_srcrev == args.srcrev:
raise DevtoolError("Current and upgrade versions are the same version")
+ if args.version:
+ if bb.utils.vercmp_string(args.version, old_ver) < 0:
+ logger.warning('Upgrade version %s compares as less than the current version %s. If you are using a package feed for on-target upgrades or providing this recipe for general consumption, then you should increment PE in the recipe (or if there is no current PE value set, set it to "1")' % (args.version, old_ver))
+ check_prerelease_version(args.version, 'devtool upgrade')
rf = None
+ license_diff = None
try:
- rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd)
- rev2, md5, sha256 = _extract_new_source(args.version, srctree, args.no_patch,
- args.srcrev, args.branch, args.keep_temp,
+ logger.info('Extracting current version source...')
+ rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ logger.info('Extracting upgraded version source...')
+ rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
+ args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, args.srcbranch, config.workspace_path, tinfoil, rd)
+ new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ license_diff = _generate_license_diff(old_licenses, new_licenses)
+ rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
except DevtoolError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
+
+ update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
+
logger.info('Upgraded source extracted to %s' % srctree)
logger.info('New recipe is %s' % rf)
+ if license_diff:
+ logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
finally:
tinfoil.shutdown()
return 0
+def latest_version(args, config, basepath, workspace):
+ """Entry point for the devtool 'latest_version' subcommand"""
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+ version_info = oe.recipeutils.get_recipe_upstream_version(rd)
+ # "new-commits-available" is an indication that upstream never issues version tags
+ if not version_info['version'].endswith("new-commits-available"):
+ logger.info("Current version: {}".format(version_info['current_version']))
+ logger.info("Latest version: {}".format(version_info['version']))
+ if version_info['revision']:
+ logger.info("Latest version's commit: {}".format(version_info['revision']))
+ else:
+ logger.info("Latest commit: {}".format(version_info['revision']))
+ finally:
+ tinfoil.shutdown()
+ return 0
+
+def check_upgrade_status(args, config, basepath, workspace):
+ if not args.recipe:
+ logger.info("Checking the upstream status for all recipes may take a few minutes")
+ results = oe.recipeutils.get_recipe_upgrade_status(args.recipe)
+ for result in results:
+ # pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
+ if args.all or result[1] != 'MATCH':
+ logger.info("{:25} {:15} {:15} {} {} {}".format( result[0],
+ result[2],
+ result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
+ result[4],
+ result[5] if result[5] != 'N/A' else "",
+ "cannot be updated due to: %s" %(result[6]) if result[6] else ""))
+
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
@@ -393,13 +622,28 @@ def register_commands(subparsers, context):
group='starting')
parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
- parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV)')
- parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (required if fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV). If omitted, latest upstream version will be determined and used, if possible.')
+ parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (useful when fetching from an SCM such as git)')
parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
+ parser_upgrade.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
group = parser_upgrade.add_mutually_exclusive_group()
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_upgrade.set_defaults(func=upgrade)
+ parser_upgrade.add_argument('--keep-failure', action="store_true", help='Keep failed upgrade recipe and associated files (for debugging)')
+ parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
+
+ parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
+ description='Queries the upstream server for what the latest upstream release is (for git, tags are checked, for tarballs, a list of them is obtained, and one with the highest version number is reported)',
+ group='info')
+ parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
+ parser_latest_version.set_defaults(func=latest_version)
+
+ parser_check_upgrade_status = subparsers.add_parser('check-upgrade-status', help="Report upgradability for multiple (or all) recipes",
+ description="Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available",
+ group='info')
+ parser_check_upgrade_status.add_argument('recipe', help='Name of the recipe to report (omit to report upgrade info for all recipes)', nargs='*')
+ parser_check_upgrade_status.add_argument('--all', '-a', help='Show all recipes, not just recipes needing upgrade', action="store_true")
+ parser_check_upgrade_status.set_defaults(func=check_upgrade_status)
diff --git a/scripts/lib/devtool/utilcmds.py b/scripts/lib/devtool/utilcmds.py
index b761a80f8f..964817766b 100644
--- a/scripts/lib/devtool/utilcmds.py
+++ b/scripts/lib/devtool/utilcmds.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool utility plugins"""
@@ -30,26 +20,35 @@ from devtool import parse_recipe
logger = logging.getLogger('devtool')
-
-def edit_recipe(args, config, basepath, workspace):
- """Entry point for the devtool 'edit-recipe' subcommand"""
+def _find_recipe_path(args, config, basepath, workspace):
if args.any_recipe:
+ logger.warning('-a/--any-recipe option is now always active, and thus the option will be removed in a future release')
+ if args.recipename in workspace:
+ recipefile = workspace[args.recipename]['recipefile']
+ else:
+ recipefile = None
+ if not recipefile:
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
- return 1
- recipefile = rd.getVar('FILE', True)
+ raise DevtoolError("Failed to find specified recipe")
+ recipefile = rd.getVar('FILE')
finally:
tinfoil.shutdown()
- else:
- check_workspace_recipe(workspace, args.recipename)
- recipefile = workspace[args.recipename]['recipefile']
- if not recipefile:
- raise DevtoolError("Recipe file for %s is not under the workspace" %
- args.recipename)
+ return recipefile
- return scriptutils.run_editor(recipefile)
+
+def find_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'find-recipe' subcommand"""
+ recipefile = _find_recipe_path(args, config, basepath, workspace)
+ print(recipefile)
+ return 0
+
+
+def edit_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'edit-recipe' subcommand"""
+ return scriptutils.run_editor(_find_recipe_path(args, config, basepath, workspace), logger)
def configure_help(args, config, basepath, workspace):
@@ -62,20 +61,20 @@ def configure_help(args, config, basepath, workspace):
rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
if not rd:
return 1
- b = rd.getVar('B', True)
- s = rd.getVar('S', True)
+ b = rd.getVar('B')
+ s = rd.getVar('S')
configurescript = os.path.join(s, 'configure')
confdisabled = 'noexec' in rd.getVarFlags('do_configure') or 'do_configure' not in (rd.getVar('__BBTASKS', False) or [])
- configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS', True) or '')
- extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF', True) or '')
- extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE', True) or '')
- do_configure = rd.getVar('do_configure', True) or ''
+ configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS') or '')
+ extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF') or '')
+ extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE') or '')
+ do_configure = rd.getVar('do_configure') or ''
do_configure_noexpand = rd.getVar('do_configure', False) or ''
packageconfig = rd.getVarFlags('PACKAGECONFIG') or []
autotools = bb.data.inherits_class('autotools', rd) and ('oe_runconf' in do_configure or 'autotools_do_configure' in do_configure)
cmake = bb.data.inherits_class('cmake', rd) and ('cmake_do_configure' in do_configure)
- cmake_do_configure = rd.getVar('cmake_do_configure', True)
- pn = rd.getVar('PN', True)
+ cmake_do_configure = rd.getVar('cmake_do_configure')
+ pn = rd.getVar('PN')
finally:
tinfoil.shutdown()
@@ -213,13 +212,23 @@ The ./configure %s output for %s follows.
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
- parser_edit_recipe = subparsers.add_parser('edit-recipe', help='Edit a recipe file in your workspace',
- description='Runs the default editor (as specified by the EDITOR variable) on the specified recipe. Note that the recipe file itself must be in the workspace (i.e. as a result of "devtool add" or "devtool upgrade"); you can override this with the -a/--any-recipe option.',
+ parser_edit_recipe = subparsers.add_parser('edit-recipe', help='Edit a recipe file',
+ description='Runs the default editor (as specified by the EDITOR variable) on the specified recipe. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
group='working')
parser_edit_recipe.add_argument('recipename', help='Recipe to edit')
- parser_edit_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Edit any recipe, not just where the recipe file itself is in the workspace')
+ # FIXME drop -a at some point in future
+ parser_edit_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
parser_edit_recipe.set_defaults(func=edit_recipe)
+ # Find-recipe
+ parser_find_recipe = subparsers.add_parser('find-recipe', help='Find a recipe file',
+ description='Finds a recipe file. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
+ group='working')
+ parser_find_recipe.add_argument('recipename', help='Recipe to find')
+ # FIXME drop -a at some point in future
+ parser_find_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
+ parser_find_recipe.set_defaults(func=find_recipe)
+
# NOTE: Needed to override the usage string here since the default
# gets the order wrong - recipename must come before --arg
parser_configure_help = subparsers.add_parser('configure-help', help='Get help on configure script options',
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
index 1e0fc1ee85..e9d52bb67b 100644
--- a/scripts/lib/recipetool/append.py
+++ b/scripts/lib/recipetool/append.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -48,7 +38,7 @@ def find_target_file(targetpath, d, pkglist=None):
"""Find the recipe installing the specified target path, optionally limited to a select list of packages"""
import json
- pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
# The mix between /etc and ${sysconfdir} here may look odd, but it is just
# being consistent with usage elsewhere
@@ -97,25 +87,12 @@ def find_target_file(targetpath, d, pkglist=None):
recipes[targetpath].append('!%s' % pn)
return recipes
-def _get_recipe_file(cooker, pn):
- import oe.recipeutils
- recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
- if not recipefile:
- skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
- if skipreasons:
- logger.error('\n'.join(skipreasons))
- else:
- logger.error("Unable to find any recipe file matching %s" % pn)
- return recipefile
-
def _parse_recipe(pn, tinfoil):
- import oe.recipeutils
- recipefile = _get_recipe_file(tinfoil.cooker, pn)
- if not recipefile:
- # Error already logged
+ try:
+ rd = tinfoil.parse_recipe(pn)
+ except bb.providers.NoProvider as e:
+ logger.error(str(e))
return None
- append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
- rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
return rd
def determine_file_source(targetpath, rd):
@@ -123,8 +100,8 @@ def determine_file_source(targetpath, rd):
import oe.recipeutils
# See if it's in do_install for the recipe
- workdir = rd.getVar('WORKDIR', True)
- src_uri = rd.getVar('SRC_URI', True)
+ workdir = rd.getVar('WORKDIR')
+ src_uri = rd.getVar('SRC_URI')
srcfile = ''
modpatches = []
elements = check_do_install(rd, targetpath)
@@ -134,7 +111,7 @@ def determine_file_source(targetpath, rd):
logger.debug('source path: %s' % srcpath)
if not srcpath.startswith('/'):
# Handle non-absolute path
- srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs', True).split()[-1], srcpath))
+ srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs').split()[-1], srcpath))
if srcpath.startswith(workdir):
# OK, now we have the source file name, look for it in SRC_URI
workdirfile = os.path.relpath(srcpath, workdir)
@@ -203,22 +180,22 @@ def get_source_path(cmdelements):
def get_func_deps(func, d):
"""Find the function dependencies of a shell function"""
- deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
- deps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
+ deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
+ deps |= set((d.getVarFlag(func, "vardeps") or "").split())
funcdeps = []
for dep in deps:
- if d.getVarFlag(dep, 'func', True):
+ if d.getVarFlag(dep, 'func'):
funcdeps.append(dep)
return funcdeps
def check_do_install(rd, targetpath):
"""Look at do_install for a command that installs/copies the specified target path"""
- instpath = os.path.abspath(os.path.join(rd.getVar('D', True), targetpath.lstrip('/')))
- do_install = rd.getVar('do_install', True)
+ instpath = os.path.abspath(os.path.join(rd.getVar('D'), targetpath.lstrip('/')))
+ do_install = rd.getVar('do_install')
# Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
deps = get_func_deps('do_install', rd)
for dep in deps:
- do_install = do_install.replace(dep, rd.getVar(dep, True))
+ do_install = do_install.replace(dep, rd.getVar(dep))
# Look backwards through do_install as we want to catch where a later line (perhaps
# from a bbappend) is writing over the top
@@ -251,7 +228,7 @@ def appendfile(args):
if stdout:
logger.debug('file command output: %s' % stdout.rstrip())
if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
- logger.warn('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
+ logger.warning('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
if args.recipe:
recipes = {args.targetpath: [args.recipe],}
@@ -288,7 +265,7 @@ def appendfile(args):
if selectpn:
logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
if postinst_pns:
- logger.warn('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
+ logger.warning('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
rd = _parse_recipe(selectpn, tinfoil)
if not rd:
# Error message already shown
@@ -299,12 +276,12 @@ def appendfile(args):
sourcetype, sourcepath = sourcefile.split('://', 1)
logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
if sourcetype == 'patch':
- logger.warn('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
+ logger.warning('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
sourcepath = None
else:
logger.debug('Unable to determine source file, proceeding anyway')
if modpatches:
- logger.warn('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
+ logger.warning('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
if instelements and sourcepath:
install = None
@@ -335,12 +312,12 @@ def appendfile(args):
def appendsrc(args, files, rd, extralines=None):
import oe.recipeutils
- srcdir = rd.getVar('S', True)
- workdir = rd.getVar('WORKDIR', True)
+ srcdir = rd.getVar('S')
+ workdir = rd.getVar('WORKDIR')
import bb.fetch
simplified = {}
- src_uri = rd.getVar('SRC_URI', True).split()
+ src_uri = rd.getVar('SRC_URI').split()
for uri in src_uri:
if uri.endswith(';'):
uri = uri[:-1]
@@ -353,10 +330,10 @@ def appendsrc(args, files, rd, extralines=None):
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
- if rd.getVar('S', True) == rd.getVar('STAGING_KERNEL_DIR', True):
+ if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
srcdir = os.path.join(workdir, 'git')
if not bb.data.inherits_class('kernel-yocto', rd):
- logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
+ logger.warning('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
@@ -370,9 +347,9 @@ def appendsrc(args, files, rd, extralines=None):
if simple_str in simplified:
existing = simplified[simple_str]
if source_uri != existing:
- logger.warn('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
+ logger.warning('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
else:
- logger.warn('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ logger.warning('{0!r} is already in SRC_URI, not adding'.format(source_uri))
else:
extralines.append('SRC_URI += {0}'.format(source_uri))
copyfiles[newfile] = srcfile
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 9b31fe92d7..4c4bbadb4c 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -1,19 +1,9 @@
# Recipe creation tool - create command plugin
#
-# Copyright (C) 2014-2016 Intel Corporation
+# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -26,12 +16,24 @@ import logging
import scriptutils
from urllib.parse import urlparse, urldefrag, urlsplit
import hashlib
-
+import bb.fetch2
logger = logging.getLogger('recipetool')
tinfoil = None
plugins = None
+def log_error_cond(message, debugonly):
+ if debugonly:
+ logger.debug(message)
+ else:
+ logger.error(message)
+
+def log_info_cond(message, debugonly):
+ if debugonly:
+ logger.debug(message)
+ else:
+ logger.info(message)
+
def plugin_init(pluginlist):
# Take a reference to the list so we can use it later
global plugins
@@ -47,6 +49,9 @@ class RecipeHandler(object):
recipecmakefilemap = {}
recipebinmap = {}
+ def __init__(self):
+ self._devtool = False
+
@staticmethod
def load_libmap(d):
'''Load library->recipe mapping'''
@@ -55,9 +60,11 @@ class RecipeHandler(object):
if RecipeHandler.recipelibmap:
return
# First build up library->package mapping
- shlib_providers = oe.package.read_shlib_providers(d)
- libdir = d.getVar('libdir', True)
- base_libdir = d.getVar('base_libdir', True)
+ d2 = bb.data.createCopy(d)
+ d2.setVar("WORKDIR_PKGDATA", "${PKGDATA_DIR}")
+ shlib_providers = oe.package.read_shlib_providers(d2)
+ libdir = d.getVar('libdir')
+ base_libdir = d.getVar('base_libdir')
libpaths = list(set([base_libdir, libdir]))
libname_re = re.compile('^lib(.+)\.so.*$')
pkglibmap = {}
@@ -73,7 +80,7 @@ class RecipeHandler(object):
logger.debug('unable to extract library name from %s' % lib)
# Now turn it into a library->recipe mapping
- pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
for libname, pkg in pkglibmap.items():
try:
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
@@ -83,7 +90,7 @@ class RecipeHandler(object):
break
except IOError as ioe:
if ioe.errno == 2:
- logger.warn('unable to find a pkgdata file for package %s' % pkg)
+ logger.warning('unable to find a pkgdata file for package %s' % pkg)
else:
raise
@@ -97,9 +104,9 @@ class RecipeHandler(object):
'''Build up development file->recipe mapping'''
if RecipeHandler.recipeheadermap:
return
- pkgdata_dir = d.getVar('PKGDATA_DIR', True)
- includedir = d.getVar('includedir', True)
- cmakedir = os.path.join(d.getVar('libdir', True), 'cmake')
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+ includedir = d.getVar('includedir')
+ cmakedir = os.path.join(d.getVar('libdir'), 'cmake')
for pkg in glob.glob(os.path.join(pkgdata_dir, 'runtime', '*-dev')):
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
pn = None
@@ -128,9 +135,9 @@ class RecipeHandler(object):
'''Build up native binary->recipe mapping'''
if RecipeHandler.recipebinmap:
return
- sstate_manifests = d.getVar('SSTATE_MANIFESTS', True)
- staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE', True)
- build_arch = d.getVar('BUILD_ARCH', True)
+ sstate_manifests = d.getVar('SSTATE_MANIFESTS')
+ staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE')
+ build_arch = d.getVar('BUILD_ARCH')
fileprefix = 'manifest-%s-' % build_arch
for fn in glob.glob(os.path.join(sstate_manifests, '%s*-native.populate_sysroot' % fileprefix)):
with open(fn, 'r') as f:
@@ -141,10 +148,12 @@ class RecipeHandler(object):
RecipeHandler.recipebinmap[prog] = pn
@staticmethod
- def checkfiles(path, speclist, recursive=False):
+ def checkfiles(path, speclist, recursive=False, excludedirs=None):
results = []
if recursive:
- for root, _, files in os.walk(path):
+ for root, dirs, files in os.walk(path, topdown=True):
+ if excludedirs:
+ dirs[:] = [d for d in dirs if d not in excludedirs]
for fn in files:
for spec in speclist:
if fnmatch.fnmatch(fn, spec):
@@ -222,7 +231,8 @@ class RecipeHandler(object):
if deps:
values['DEPENDS'] = ' '.join(deps)
- def genfunction(self, outlines, funcname, content, python=False, forcespace=False):
+ @staticmethod
+ def genfunction(outlines, funcname, content, python=False, forcespace=False):
if python:
prefix = 'python '
else:
@@ -324,8 +334,13 @@ def determine_from_url(srcuri):
pv = res.group(2).strip().replace('_', '.')
if not pn and not pv:
- srcfile = os.path.basename(parseres.path.rstrip('/'))
- pn, pv = determine_from_filename(srcfile)
+ if parseres.scheme not in ['git', 'gitsm', 'svn', 'hg']:
+ srcfile = os.path.basename(parseres.path.rstrip('/'))
+ pn, pv = determine_from_filename(srcfile)
+ elif parseres.scheme in ['git', 'gitsm']:
+ pn = os.path.basename(parseres.path.rstrip('/')).lower().replace('_', '-')
+ if pn.endswith('.git'):
+ pn = pn[:-4]
logger.debug('Determined from source URL: name = "%s", version = "%s"' % (pn, pv))
return (pn, pv)
@@ -335,7 +350,6 @@ def supports_srcrev(uri):
# This is a bit sad, but if you don't have this set there can be some
# odd interactions with the urldata cache which lead to errors
localdata.setVar('SRCREV', '${AUTOREV}')
- bb.data.update_data(localdata)
try:
fetcher = bb.fetch2.Fetch([uri], localdata)
urldata = fetcher.ud
@@ -353,14 +367,33 @@ def reformat_git_uri(uri):
'''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
checkuri = uri.split(';', 1)[0]
if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
- res = re.match('(http|https|ssh)://([^;]+(\.git)?)(;.*)?$', uri)
- if res:
- # Need to switch the URI around so that the git fetcher is used
- return 'git://%s;protocol=%s%s' % (res.group(2), res.group(1), res.group(4) or '')
- elif '@' in checkuri:
- # Catch e.g. git@git.example.com:repo.git
- return 'git://%s;protocol=ssh' % checkuri.replace(':', '/', 1)
- return uri
+ # Appends scheme if the scheme is missing
+ if not '://' in uri:
+ uri = 'git://' + uri
+ scheme, host, path, user, pswd, parms = bb.fetch2.decodeurl(uri)
+ # Detection mechanism, this is required due to certain URL are formatter with ":" rather than "/"
+ # which causes decodeurl to fail getting the right host and path
+ if len(host.split(':')) > 1:
+ splitslash = host.split(':')
+ # Port number should not be split from host
+ if not re.match('^[0-9]+$', splitslash[1]):
+ host = splitslash[0]
+ path = '/' + splitslash[1] + path
+ #Algorithm:
+ # if user is defined, append protocol=ssh or if a protocol is defined, then honor the user-defined protocol
+ # if no user & password is defined, check for scheme type and append the protocol with the scheme type
+ # finally if protocols or if the url is well-formed, do nothing and rejoin everything back to normal
+ # Need to repackage the arguments for encodeurl, the format is: (scheme, host, path, user, password, OrderedDict([('key', 'value')]))
+ if user:
+ if not 'protocol' in parms:
+ parms.update({('protocol', 'ssh')})
+ elif (scheme == "http" or scheme == 'https' or scheme == 'ssh') and not ('protocol' in parms):
+ parms.update({('protocol', scheme)})
+ # Always append 'git://'
+ fUrl = bb.fetch2.encodeurl(('git', host, path, user, pswd, parms))
+ return fUrl
+ else:
+ return uri
def is_package(url):
'''Check if a URL points to a package'''
@@ -380,16 +413,23 @@ def create_recipe(args):
pkgarch = "${MACHINE_ARCH}"
extravalues = {}
- checksums = (None, None)
+ checksums = {}
tempsrc = ''
source = args.source
srcsubdir = ''
srcrev = '${AUTOREV}'
+ srcbranch = ''
+ scheme = ''
+ storeTagName = ''
+ pv_srcpv = False
if os.path.isfile(source):
source = 'file://%s' % os.path.abspath(source)
if scriptutils.is_src_url(source):
+ # Warn about github archive URLs
+ if re.match('https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
+ logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
@@ -400,25 +440,63 @@ def create_recipe(args):
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
if res:
+ if args.srcrev:
+ logger.error('rev= parameter and -S/--srcrev option cannot both be specified - use one or the other')
+ sys.exit(1)
+ if args.autorev:
+ logger.error('rev= parameter and -a/--autorev option cannot both be specified - use one or the other')
+ sys.exit(1)
srcrev = res.group(1)
srcuri = rev_re.sub('', srcuri)
- tempsrc = tempfile.mkdtemp(prefix='recipetool-')
- srctree = tempsrc
- if fetchuri.startswith('npm://'):
- # Check if npm is available
- npm = bb.utils.which(tinfoil.config_data.getVar('PATH', True), 'npm')
- if not npm:
- logger.error('npm:// URL requested but npm is not available - you need to either build nodejs-native or install npm using your package manager')
+ elif args.srcrev:
+ srcrev = args.srcrev
+
+ # Check whether users provides any branch info in fetchuri.
+ # If true, we will skip all branch checking process to honor all user's input.
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(fetchuri)
+ srcbranch = params.get('branch')
+ if args.srcbranch:
+ if srcbranch:
+ logger.error('branch= parameter and -B/--srcbranch option cannot both be specified - use one or the other')
sys.exit(1)
- logger.info('Fetching %s...' % srcuri)
+ srcbranch = args.srcbranch
+ nobranch = params.get('nobranch')
+ if nobranch and srcbranch:
+ logger.error('nobranch= cannot be used if you specify a branch')
+ sys.exit(1)
+ tag = params.get('tag')
+ if not srcbranch and not nobranch and srcrev != '${AUTOREV}':
+ # Append nobranch=1 in the following conditions:
+ # 1. User did not set 'branch=' in srcuri, and
+ # 2. User did not set 'nobranch=1' in srcuri, and
+ # 3. Source revision is not '${AUTOREV}'
+ params['nobranch'] = '1'
+ if tag:
+ # Keep a copy of tag and append nobranch=1 then remove tag from URL.
+ # Bitbake fetcher unable to fetch when {AUTOREV} and tag is set at the same time.
+ storeTagName = params['tag']
+ params['nobranch'] = '1'
+ del params['tag']
+ if scheme == 'npm':
+ params['noverify'] = '1'
+ fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+
+ tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
+ bb.utils.mkdirhier(tmpparent)
+ tempsrc = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
+ srctree = os.path.join(tempsrc, 'source')
+
try:
- checksums = scriptutils.fetch_uri(tinfoil.config_data, fetchuri, srctree, srcrev)
- except bb.fetch2.BBFetchException as e:
- logger.error(str(e).rstrip())
+ checksums, ftmpdir = scriptutils.fetch_url(tinfoil, fetchuri, srcrev, srctree, logger, preserve_tmp=args.keep_temp)
+ except scriptutils.FetchUrlFailure as e:
+ logger.error(str(e))
sys.exit(1)
- dirlist = os.listdir(srctree)
- if 'git.indirectionsymlink' in dirlist:
- dirlist.remove('git.indirectionsymlink')
+
+ if ftmpdir and args.keep_temp:
+ logger.info('Fetch temp directory is %s' % ftmpdir)
+
+ dirlist = scriptutils.filter_src_subdirs(srctree)
+ logger.debug('Directory listing (excluding filtered out):\n %s' % '\n '.join(dirlist))
if len(dirlist) == 1:
singleitem = os.path.join(srctree, dirlist[0])
if os.path.isdir(singleitem):
@@ -426,41 +504,85 @@ def create_recipe(args):
srcsubdir = dirlist[0]
srctree = os.path.join(srctree, srcsubdir)
else:
- with open(singleitem, 'r', errors='surrogateescape') as f:
- if '<html' in f.read(100).lower():
- logger.error('Fetching "%s" returned a single HTML page - check the URL is correct and functional' % fetchuri)
- sys.exit(1)
+ check_single_file(dirlist[0], fetchuri)
+ elif len(dirlist) == 0:
+ if '/' in fetchuri:
+ fn = os.path.join(tinfoil.config_data.getVar('DL_DIR'), fetchuri.split('/')[-1])
+ if os.path.isfile(fn):
+ check_single_file(fn, fetchuri)
+ # If we've got to here then there's no source so we might as well give up
+ logger.error('URL %s resulted in an empty source tree' % fetchuri)
+ sys.exit(1)
+
+ # We need this checking mechanism to improve the recipe created by recipetool and devtool
+ # is able to parse and build by bitbake.
+ # If there is no input for branch name, then check for branch name with SRCREV provided.
+ if not srcbranch and not nobranch and srcrev and (srcrev != '${AUTOREV}') and scheme in ['git', 'gitsm']:
+ try:
+ cmd = 'git branch -r --contains'
+ check_branch, check_branch_err = bb.process.run('%s %s' % (cmd, srcrev), cwd=srctree)
+ except bb.process.ExecutionError as err:
+ logger.error(str(err))
+ sys.exit(1)
+ get_branch = [x.strip() for x in check_branch.splitlines()]
+ # Remove HEAD reference point and drop remote prefix
+ get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
+ if 'master' in get_branch:
+ # If it is master, we do not need to append 'branch=master' as this is default.
+ # Even with the case where get_branch has multiple objects, if 'master' is one
+ # of them, we should default take from 'master'
+ srcbranch = ''
+ elif len(get_branch) == 1:
+ # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ srcbranch = get_branch[0]
+ else:
+ # If get_branch contains more than one objects, then display error and exit.
+ mbrch = '\n ' + '\n '.join(get_branch)
+ logger.error('Revision %s was found on multiple branches: %s\nPlease provide the correct branch with -B/--srcbranch' % (srcrev, mbrch))
+ sys.exit(1)
+
+ # Since we might have a value in srcbranch, we need to
+ # recontruct the srcuri to include 'branch' in params.
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
+ if srcbranch:
+ params['branch'] = srcbranch
+
+ if storeTagName and scheme in ['git', 'gitsm']:
+ # Check srcrev using tag and check validity of the tag
+ cmd = ('git rev-parse --verify %s' % (storeTagName))
+ try:
+ check_tag, check_tag_err = bb.process.run('%s' % cmd, cwd=srctree)
+ srcrev = check_tag.split()[0]
+ except bb.process.ExecutionError as err:
+ logger.error(str(err))
+ logger.error("Possibly wrong tag name is provided")
+ sys.exit(1)
+ # Drop tag from srcuri as it will have conflicts with SRCREV during recipe parse.
+ del params['tag']
+ srcuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+
if os.path.exists(os.path.join(srctree, '.gitmodules')) and srcuri.startswith('git://'):
srcuri = 'gitsm://' + srcuri[6:]
logger.info('Fetching submodules...')
bb.process.run('git submodule update --init --recursive', cwd=srctree)
if is_package(fetchuri):
- tmpfdir = tempfile.mkdtemp(prefix='recipetool-')
- try:
- pkgfile = None
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ pkgfile = bb.fetch2.localpath(fetchuri, localdata)
+ if pkgfile:
+ tmpfdir = tempfile.mkdtemp(prefix='recipetool-')
try:
- fileuri = fetchuri + ';unpack=0'
- scriptutils.fetch_uri(tinfoil.config_data, fileuri, tmpfdir, srcrev)
- for root, _, files in os.walk(tmpfdir):
- for f in files:
- pkgfile = os.path.join(root, f)
- break
- except bb.fetch2.BBFetchException as e:
- logger.warn('Second fetch to get metadata failed: %s' % str(e).rstrip())
-
- if pkgfile:
if pkgfile.endswith(('.deb', '.ipk')):
- stdout, _ = bb.process.run('ar x %s control.tar.gz' % pkgfile, cwd=tmpfdir)
- stdout, _ = bb.process.run('tar xf control.tar.gz ./control', cwd=tmpfdir)
+ stdout, _ = bb.process.run('ar x %s' % pkgfile, cwd=tmpfdir)
+ stdout, _ = bb.process.run('tar xf control.tar.gz', cwd=tmpfdir)
values = convert_debian(tmpfdir)
extravalues.update(values)
elif pkgfile.endswith(('.rpm', '.srpm')):
stdout, _ = bb.process.run('rpm -qp --xml %s > pkginfo.xml' % pkgfile, cwd=tmpfdir)
values = convert_rpm_xml(os.path.join(tmpfdir, 'pkginfo.xml'))
extravalues.update(values)
- finally:
- shutil.rmtree(tmpfdir)
+ finally:
+ shutil.rmtree(tmpfdir)
else:
# Assume we're pointing to an existing source tree
if args.extract_to:
@@ -488,9 +610,9 @@ def create_recipe(args):
if args.src_subdir:
srcsubdir = os.path.join(srcsubdir, args.src_subdir)
- srctree_use = os.path.join(srctree, args.src_subdir)
+ srctree_use = os.path.abspath(os.path.join(srctree, args.src_subdir))
else:
- srctree_use = srctree
+ srctree_use = os.path.abspath(srctree)
if args.outfile and os.path.isdir(args.outfile):
outfile = None
@@ -512,9 +634,10 @@ def create_recipe(args):
# We need a blank line here so that patch_recipe_lines can rewind before the LICENSE comments
lines_before.append('')
- handled = []
- licvalues = handle_license_vars(srctree_use, lines_before, handled, extravalues, tinfoil.config_data)
+ # We'll come back and replace this later in handle_license_vars()
+ lines_before.append('##LICENSE_PLACEHOLDER##')
+ handled = []
classes = []
# FIXME This is kind of a hack, we probably ought to be using bitbake to do this
@@ -550,31 +673,31 @@ def create_recipe(args):
else:
realpv = None
- if srcuri and not realpv or not pn:
- name_pn, name_pv = determine_from_url(srcuri)
- if name_pn and not pn:
- pn = name_pn
- if name_pv and not realpv:
- realpv = name_pv
-
-
if not srcuri:
lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
lines_before.append('SRC_URI = "%s"' % srcuri)
- (md5value, sha256value) = checksums
- if md5value:
- lines_before.append('SRC_URI[md5sum] = "%s"' % md5value)
- if sha256value:
- lines_before.append('SRC_URI[sha256sum] = "%s"' % sha256value)
+ for key, value in sorted(checksums.items()):
+ lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
if srcuri and supports_srcrev(srcuri):
lines_before.append('')
lines_before.append('# Modify these as desired')
- lines_before.append('PV = "%s+git${SRCPV}"' % (realpv or '1.0'))
+ # Note: we have code to replace realpv further down if it gets set to some other value
+ scheme, _, _, _, _, _ = bb.fetch2.decodeurl(srcuri)
+ if scheme in ['git', 'gitsm']:
+ srcpvprefix = 'git'
+ elif scheme == 'svn':
+ srcpvprefix = 'svnr'
+ else:
+ srcpvprefix = scheme
+ lines_before.append('PV = "%s+%s${SRCPV}"' % (realpv or '1.0', srcpvprefix))
+ pv_srcpv = True
if not args.autorev and srcrev == '${AUTOREV}':
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- srcrev = stdout.rstrip()
+ srcrev = stdout.rstrip()
lines_before.append('SRCREV = "%s"' % srcrev)
+ if args.provides:
+ lines_before.append('PROVIDES = "%s"' % args.provides)
lines_before.append('')
if srcsubdir and not args.binary:
@@ -591,6 +714,11 @@ def create_recipe(args):
lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
lines_after.append('')
+ if args.fetch_dev:
+ extravalues['fetchdev'] = True
+ else:
+ extravalues['fetchdev'] = None
+
# Find all plugins that want to register handlers
logger.debug('Loading recipe handlers')
raw_handlers = []
@@ -607,6 +735,7 @@ def create_recipe(args):
handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
for handler, priority, _ in handlers:
logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ setattr(handler, '_devtool', args.devtool)
handlers = [item[0] for item in handlers]
# Apply the handlers
@@ -641,9 +770,18 @@ def create_recipe(args):
if '_' in pn:
pn = pn.replace('_', '-')
+ if srcuri and not realpv or not pn:
+ name_pn, name_pv = determine_from_url(srcuri)
+ if name_pn and not pn:
+ pn = name_pn
+ if name_pv and not realpv:
+ realpv = name_pv
+
+ licvalues = handle_license_vars(srctree_use, lines_before, handled, extravalues, tinfoil.config_data)
+
if not outfile:
if not pn:
- logger.error('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile')
+ log_error_cond('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile', args.devtool)
# devtool looks for this specific exit code, so don't change it
sys.exit(15)
else:
@@ -690,10 +828,11 @@ def create_recipe(args):
skipblank = True
continue
elif line.startswith('SRC_URI = '):
- if realpv:
+ if realpv and not pv_srcpv:
line = line.replace(realpv, '${PV}')
elif line.startswith('PV = '):
if realpv:
+ # Replace the first part of the PV value
line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
lines_before.append(line)
@@ -713,6 +852,15 @@ def create_recipe(args):
if not bbclassextend:
lines_after.append('BBCLASSEXTEND = "native"')
+ postinst = ("postinst", extravalues.pop('postinst', None))
+ postrm = ("postrm", extravalues.pop('postrm', None))
+ preinst = ("preinst", extravalues.pop('preinst', None))
+ prerm = ("prerm", extravalues.pop('prerm', None))
+ funcs = [postinst, postrm, preinst, prerm]
+ for func in funcs:
+ if func[1]:
+ RecipeHandler.genfunction(lines_after, 'pkg_%s_${PN}' % func[0], func[1])
+
outlines = []
outlines.extend(lines_before)
if classes:
@@ -723,9 +871,6 @@ def create_recipe(args):
outlines.extend(lines_after)
if extravalues:
- if 'LICENSE' in extravalues and not licvalues:
- # Don't blow away 'CLOSED' value that comments say we set
- del extravalues['LICENSE']
_, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
if args.extract_to:
@@ -739,7 +884,7 @@ def create_recipe(args):
shutil.move(srctree, args.extract_to)
if tempsrc == srctree:
tempsrc = None
- logger.info('Source extracted to %s' % args.extract_to)
+ log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
if outfile == '-':
sys.stdout.write('\n'.join(outlines) + '\n')
@@ -752,7 +897,7 @@ def create_recipe(args):
continue
f.write('%s\n' % line)
lastline = line
- logger.info('Recipe %s has been created; further editing may be required to make it fully functional' % outfile)
+ log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
if tempsrc:
if args.keep_temp:
@@ -762,52 +907,101 @@ def create_recipe(args):
return 0
+def check_single_file(fn, fetchuri):
+ """Determine if a single downloaded file is something we can't handle"""
+ with open(fn, 'r', errors='surrogateescape') as f:
+ if '<html' in f.read(100).lower():
+ logger.error('Fetching "%s" returned a single HTML page - check the URL is correct and functional' % fetchuri)
+ sys.exit(1)
+
+def split_value(value):
+ if isinstance(value, str):
+ return value.split()
+ else:
+ return value
+
def handle_license_vars(srctree, lines_before, handled, extravalues, d):
+ lichandled = [x for x in handled if x[0] == 'license']
+ if lichandled:
+ # Someone else has already handled the license vars, just return their value
+ return lichandled[0][1]
+
licvalues = guess_license(srctree, d)
+ licenses = []
lic_files_chksum = []
lic_unknown = []
+ lines = []
if licvalues:
- licenses = []
for licvalue in licvalues:
if not licvalue[0] in licenses:
licenses.append(licvalue[0])
lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
if licvalue[0] == 'Unknown':
lic_unknown.append(licvalue[1])
- lines_before.append('# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
- lines_before.append('# your responsibility to verify that the values are complete and correct.')
- if len(licvalues) > 1:
- lines_before.append('#')
- lines_before.append('# NOTE: multiple licenses have been detected; if that is correct you should separate')
- lines_before.append('# these in the LICENSE value using & if the multiple licenses all apply, or | if there')
- lines_before.append('# is a choice between the multiple licenses. If in doubt, check the accompanying')
- lines_before.append('# documentation to determine which situation is applicable.')
if lic_unknown:
- lines_before.append('#')
- lines_before.append('# The following license files were not able to be identified and are')
- lines_before.append('# represented as "Unknown" below, you will need to check them yourself:')
+ lines.append('#')
+ lines.append('# The following license files were not able to be identified and are')
+ lines.append('# represented as "Unknown" below, you will need to check them yourself:')
for licfile in lic_unknown:
- lines_before.append('# %s' % licfile)
- lines_before.append('#')
- else:
- lines_before.append('# Unable to find any files that looked like license statements. Check the accompanying')
- lines_before.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
- lines_before.append('#')
- lines_before.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
- lines_before.append('# this is not accurate with respect to the licensing of the software being built (it')
- lines_before.append('# will not be in most cases) you must specify the correct value before using this')
- lines_before.append('# recipe for anything other than initial testing/development!')
- licenses = ['CLOSED']
- pkg_license = extravalues.pop('LICENSE', None)
- if pkg_license:
+ lines.append('# %s' % licfile)
+
+ extra_license = split_value(extravalues.pop('LICENSE', []))
+ if '&' in extra_license:
+ extra_license.remove('&')
+ if extra_license:
if licenses == ['Unknown']:
- lines_before.append('# NOTE: The following LICENSE value was determined from the original package metadata')
- licenses = [pkg_license]
+ licenses = extra_license
else:
- lines_before.append('# NOTE: Original package metadata indicates license is: %s' % pkg_license)
- lines_before.append('LICENSE = "%s"' % ' '.join(licenses))
- lines_before.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
- lines_before.append('')
+ for item in extra_license:
+ if item not in licenses:
+ licenses.append(item)
+ extra_lic_files_chksum = split_value(extravalues.pop('LIC_FILES_CHKSUM', []))
+ for item in extra_lic_files_chksum:
+ if item not in lic_files_chksum:
+ lic_files_chksum.append(item)
+
+ if lic_files_chksum:
+ # We are going to set the vars, so prepend the standard disclaimer
+ lines.insert(0, '# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
+ lines.insert(1, '# your responsibility to verify that the values are complete and correct.')
+ else:
+ # Without LIC_FILES_CHKSUM we set LICENSE = "CLOSED" to allow the
+ # user to get started easily
+ lines.append('# Unable to find any files that looked like license statements. Check the accompanying')
+ lines.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
+ lines.append('#')
+ lines.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
+ lines.append('# this is not accurate with respect to the licensing of the software being built (it')
+ lines.append('# will not be in most cases) you must specify the correct value before using this')
+ lines.append('# recipe for anything other than initial testing/development!')
+ licenses = ['CLOSED']
+
+ if extra_license and sorted(licenses) != sorted(extra_license):
+ lines.append('# NOTE: Original package / source metadata indicates license is: %s' % ' & '.join(extra_license))
+
+ if len(licenses) > 1:
+ lines.append('#')
+ lines.append('# NOTE: multiple licenses have been detected; they have been separated with &')
+ lines.append('# in the LICENSE value for now since it is a reasonable assumption that all')
+ lines.append('# of the licenses apply. If instead there is a choice between the multiple')
+ lines.append('# licenses then you should change the value to separate the licenses with |')
+ lines.append('# instead of &. If there is any doubt, check the accompanying documentation')
+ lines.append('# to determine which situation is applicable.')
+
+ lines.append('LICENSE = "%s"' % ' & '.join(licenses))
+ lines.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
+ lines.append('')
+
+ # Replace the placeholder so we get the values in the right place in the recipe file
+ try:
+ pos = lines_before.index('##LICENSE_PLACEHOLDER##')
+ except ValueError:
+ pos = -1
+ if pos == -1:
+ lines_before.extend(lines)
+ else:
+ lines_before[pos:pos+1] = lines
+
handled.append(('license', licvalues))
return licvalues
@@ -816,7 +1010,7 @@ def get_license_md5sums(d, static_only=False):
md5sums = {}
if not static_only:
# Gather md5sums of license files in common license dir
- commonlicdir = d.getVar('COMMON_LICENSE_DIR', True)
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR')
for fn in os.listdir(commonlicdir):
md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
md5sums[md5value] = fn
@@ -859,6 +1053,7 @@ def get_license_md5sums(d, static_only=False):
md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+ md5sums['bfe1f75d606912a4111c90743d6c7325'] = 'MPL-1.1'
return md5sums
def crunch_license(licfile):
@@ -904,6 +1099,10 @@ def crunch_license(licfile):
crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/epl-v10
+ crunched_md5sums['efe2cb9a35826992b9df68224e3c2628'] = 'EPL-1.0'
+ # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/edl-v10
+ crunched_md5sums['0a9c78c0a398d1bbce4a166757d60387'] = 'EDL-1.0'
lictext = []
with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
@@ -936,7 +1135,7 @@ def guess_license(srctree, d):
md5sums = get_license_md5sums(d)
licenses = []
- licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*']
+ licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
licfiles = []
for root, dirs, files in os.walk(srctree):
for fn in files:
@@ -986,7 +1185,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
return outlicenses
def read_pkgconfig_provides(d):
- pkgdatadir = d.getVar('PKGDATA_DIR', True)
+ pkgdatadir = d.getVar('PKGDATA_DIR')
pkgmap = {}
for fn in glob.glob(os.path.join(pkgdatadir, 'shlibs2', '*.pclist')):
with open(fn, 'r') as f:
@@ -1047,6 +1246,25 @@ def convert_debian(debpath):
varname = value_map.get(key, None)
if varname:
values[varname] = value
+ postinst = os.path.join(debpath, 'postinst')
+ postrm = os.path.join(debpath, 'postrm')
+ preinst = os.path.join(debpath, 'preinst')
+ prerm = os.path.join(debpath, 'prerm')
+ sfiles = [postinst, postrm, preinst, prerm]
+ for sfile in sfiles:
+ if os.path.isfile(sfile):
+ logger.info("Converting %s file to recipe function..." %
+ os.path.basename(sfile).upper())
+ content = []
+ with open(sfile) as f:
+ for line in f:
+ if "#!/" in line:
+ continue
+ line = line.rstrip("\n")
+ if line.strip():
+ content.append(line)
+ if content:
+ values[os.path.basename(f.name)] = content
#if depends:
# values['DEPENDS'] = ' '.join(depends)
@@ -1082,6 +1300,7 @@ def register_commands(subparsers):
description='Creates a new recipe from a source tree')
parser_create.add_argument('source', help='Path or URL to source')
parser_create.add_argument('-o', '--outfile', help='Specify filename for recipe to create')
+ parser_create.add_argument('-p', '--provides', help='Specify an alias for the item provided by the recipe')
parser_create.add_argument('-m', '--machine', help='Make recipe machine-specific as opposed to architecture-specific', action='store_true')
parser_create.add_argument('-x', '--extract-to', metavar='EXTRACTPATH', help='Assuming source is a URL, fetch it and extract it to the directory specified as %(metavar)s')
parser_create.add_argument('-N', '--name', help='Name to use within recipe (PN)')
@@ -1089,7 +1308,13 @@ def register_commands(subparsers):
parser_create.add_argument('-b', '--binary', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure)', action='store_true')
parser_create.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
parser_create.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
- parser_create.add_argument('-a', '--autorev', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ group = parser_create.add_mutually_exclusive_group()
+ group.add_argument('-a', '--autorev', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ group.add_argument('-S', '--srcrev', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
+ parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
+ parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index e914e53aab..3cb02766c8 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -863,6 +853,10 @@ class SpecFileRecipeHandler(RecipeHandler):
break
if len(foundvalues) == len(valuemap):
break
+ # Drop values containing unexpanded RPM macros
+ for k in list(foundvalues.keys()):
+ if '%' in foundvalues[k]:
+ del foundvalues[k]
if 'PV' in foundvalues:
if not validate_pv(foundvalues['PV']):
del foundvalues['PV']
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index e41d81a317..adfa377956 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import ast
import codecs
@@ -41,11 +31,11 @@ def tinfoil_init(instance):
class PythonRecipeHandler(RecipeHandler):
- base_pkgdeps = ['python-core']
- excluded_pkgdeps = ['python-dbg']
- # os.path is provided by python-core
+ base_pkgdeps = ['python3-core']
+ excluded_pkgdeps = ['python3-dbg']
+ # os.path is provided by python3-core
assume_provided = ['builtins', 'os.path']
- # Assumes that the host python builtin_module_names is sane for target too
+ # Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
bbvar_map = {
@@ -164,8 +154,13 @@ class PythonRecipeHandler(RecipeHandler):
if 'buildsystem' in handled:
return False
- if not RecipeHandler.checkfiles(srctree, ['setup.py']):
- return
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
+ else:
+ return False
# setup.py is always parsed to get at certain required information, such as
# distutils vs setuptools
@@ -225,9 +220,9 @@ class PythonRecipeHandler(RecipeHandler):
self.apply_info_replacements(info)
if uses_setuptools:
- classes.append('setuptools')
+ classes.append('setuptools3')
else:
- classes.append('distutils')
+ classes.append('distutils3')
if license_str:
for i, line in enumerate(lines_before):
@@ -292,7 +287,7 @@ class PythonRecipeHandler(RecipeHandler):
for feature, feature_reqs in extras_req.items():
unmapped_deps.difference_update(feature_reqs)
- feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ feature_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
inst_reqs = set()
@@ -303,7 +298,7 @@ class PythonRecipeHandler(RecipeHandler):
if inst_reqs:
unmapped_deps.difference_update(inst_reqs)
- inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
@@ -356,6 +351,8 @@ class PythonRecipeHandler(RecipeHandler):
# Naive mapping of setup() arguments to PKG-INFO field names
for d in [info, non_literals]:
for key, value in list(d.items()):
+ if key is None:
+ continue
new_key = _map(key)
if new_key != key:
del d[key]
@@ -364,7 +361,7 @@ class PythonRecipeHandler(RecipeHandler):
return info, 'setuptools' in imported_modules, non_literals, extensions
def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
info = {}
keys = set(self.bbvar_map.keys())
keys |= set(self.setuparg_list_fields)
@@ -398,7 +395,7 @@ class PythonRecipeHandler(RecipeHandler):
def get_setup_byline(self, fields, setupscript='./setup.py'):
info = {}
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
try:
info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
@@ -512,7 +509,7 @@ class PythonRecipeHandler(RecipeHandler):
except (OSError, subprocess.CalledProcessError):
pass
else:
- for line in dep_output.decode('utf-8').splitlines():
+ for line in dep_output.splitlines():
line = line.rstrip()
dep, filename = line.split('\t', 1)
if filename.endswith('/setup.py'):
@@ -532,11 +529,11 @@ class PythonRecipeHandler(RecipeHandler):
def parse_pkgdata_for_python_packages(self):
suffixes = [t[0] for t in imp.get_suffixes()]
- pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python-dir.bbclass', ldata, True)
- python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR', True)
+ bb.parse.handle('classes/python3-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
python_dirs = [python_sitedir + os.sep,
@@ -591,7 +588,7 @@ class PythonRecipeHandler(RecipeHandler):
if 'stderr' not in popenargs:
popenargs['stderr'] = subprocess.STDOUT
try:
- return subprocess.check_output(cmd, **popenargs)
+ return subprocess.check_output(cmd, **popenargs).decode('utf-8')
except OSError as exc:
logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
raise
diff --git a/scripts/lib/recipetool/create_kernel.py b/scripts/lib/recipetool/create_kernel.py
index 7dac59fd03..5740589a68 100644
--- a/scripts/lib/recipetool/create_kernel.py
+++ b/scripts/lib/recipetool/create_kernel.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -41,7 +31,7 @@ class KernelRecipeHandler(RecipeHandler):
handled.append('buildsystem')
del lines_after[:]
del classes[:]
- template = os.path.join(tinfoil.config_data.getVar('COREBASE', True), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
+ template = os.path.join(tinfoil.config_data.getVar('COREBASE'), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
def handle_var(varname, origvalue, op, newlines):
if varname in ['SRCREV', 'SRCREV_machine']:
while newlines[-1].startswith('#'):
@@ -85,7 +75,7 @@ class KernelRecipeHandler(RecipeHandler):
elif varname == 'COMPATIBLE_MACHINE':
while newlines[-1].startswith('#'):
del newlines[-1]
- machine = tinfoil.config_data.getVar('MACHINE', True)
+ machine = tinfoil.config_data.getVar('MACHINE')
return machine, op, 0, True
return origvalue, op, 0, True
with open(template, 'r') as f:
diff --git a/scripts/lib/recipetool/create_kmod.py b/scripts/lib/recipetool/create_kmod.py
index 7cf188db21..85b5c48e53 100644
--- a/scripts/lib/recipetool/create_kmod.py
+++ b/scripts/lib/recipetool/create_kmod.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -40,7 +30,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
makefiles = []
- files = RecipeHandler.checkfiles(srctree, ['*.c', '*.h'], recursive=True)
+ files = RecipeHandler.checkfiles(srctree, ['*.c', '*.h'], recursive=True, excludedirs=['contrib', 'test', 'examples'])
if files:
for cfile in files:
# Look in same dir or parent for Makefile
@@ -141,7 +131,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
warnmsg = 'Unable to find means of passing kernel path into install makefile - if kernel path is hardcoded you will need to patch the makefile'
if warnmsg:
warnmsg += '. Note that the variable KERNEL_SRC will be passed in as the kernel source path.'
- logger.warn(warnmsg)
+ logger.warning(warnmsg)
lines_after.append('# %s' % warnmsg)
return True
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
index e794614978..39429ebad3 100644
--- a/scripts/lib/recipetool/create_npm.py
+++ b/scripts/lib/recipetool/create_npm.py
@@ -2,20 +2,11 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
+import sys
import logging
import subprocess
import tempfile
@@ -36,6 +27,27 @@ def tinfoil_init(instance):
class NpmRecipeHandler(RecipeHandler):
lockdownpath = None
+ def _ensure_npm(self, fixed_setup=False):
+ if not tinfoil.recipes_parsed:
+ tinfoil.parse_recipes()
+ try:
+ rd = tinfoil.parse_recipe('nodejs-native')
+ except bb.providers.NoProvider:
+ if fixed_setup:
+ msg = 'nodejs-native is required for npm but is not available within this SDK'
+ else:
+ msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
+ logger.error(msg)
+ return None
+ bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+ npmpath = os.path.join(bindir, 'npm')
+ if not os.path.exists(npmpath):
+ tinfoil.build_targets('nodejs-native', 'addto_recipe_sysroot')
+ if not os.path.exists(npmpath):
+ logger.error('npm required to process specified source, but nodejs-native did not seem to populate it')
+ return None
+ return bindir
+
def _handle_license(self, data):
'''
Handle the license value from an npm package.json file
@@ -45,14 +57,31 @@ class NpmRecipeHandler(RecipeHandler):
license = data['license']
if isinstance(license, dict):
license = license.get('type', None)
+ if license:
+ if 'OR' in license:
+ license = license.replace('OR', '|')
+ license = license.replace('AND', '&')
+ license = license.replace(' ', '_')
+ if not license[0] == '(':
+ license = '(' + license + ')'
+ else:
+ license = license.replace('AND', '&')
+ if license[0] == '(':
+ license = license[1:]
+ if license[-1] == ')':
+ license = license[:-1]
+ license = license.replace('MIT/X11', 'MIT')
+ license = license.replace('Public Domain', 'PD')
+ license = license.replace('SEE LICENSE IN EULA',
+ 'SEE-LICENSE-IN-EULA')
return license
- def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before):
+ def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
try:
- runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
except bb.process.ExecutionError as e:
- logger.warn('npm shrinkwrap failed:\n%s' % e.stdout)
+ logger.warning('npm shrinkwrap failed:\n%s' % e.stdout)
return
tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
@@ -61,20 +90,20 @@ class NpmRecipeHandler(RecipeHandler):
extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
- def _lockdown(self, srctree, localfilesdir, extravalues, lines_before):
- runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+ def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
if not NpmRecipeHandler.lockdownpath:
NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
if not os.path.exists(relockbin):
- logger.warn('Could not find relock.js within lockdown directory; skipping lockdown')
+ logger.warning('Could not find relock.js within lockdown directory; skipping lockdown')
return
try:
bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
except bb.process.ExecutionError as e:
- logger.warn('lockdown-relock failed:\n%s' % e.stdout)
+ logger.warning('lockdown-relock failed:\n%s' % e.stdout)
return
tmpfile = os.path.join(localfilesdir, 'lockdown.json')
@@ -83,7 +112,7 @@ class NpmRecipeHandler(RecipeHandler):
extravalues['extrafiles']['lockdown.json'] = tmpfile
lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
- def _handle_dependencies(self, d, deps, lines_before, srctree):
+ def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
import scriptutils
# If this isn't a single module we need to get the dependencies
# and add them to SRC_URI
@@ -91,15 +120,28 @@ class NpmRecipeHandler(RecipeHandler):
if varname == 'SRC_URI':
if not origvalue.startswith('npm://'):
src_uri = origvalue.split()
- changed = False
- for dep, depdata in deps.items():
- version = self.get_node_version(dep, depdata, d)
+ deplist = {}
+ for dep, depver in optdeps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ if self.check_npm_optional_dependency(depdata):
+ deplist[dep] = depdata
+ for dep, depver in devdeps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ if self.check_npm_optional_dependency(depdata):
+ deplist[dep] = depdata
+ for dep, depver in deps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ deplist[dep] = depdata
+
+ extra_urls = []
+ for dep, depdata in deplist.items():
+ version = depdata.get('version', None)
if version:
url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
- scriptutils.fetch_uri(d, url, srctree)
- src_uri.append(url)
- changed = True
- if changed:
+ extra_urls.append(url)
+ if extra_urls:
+ scriptutils.fetch_url(tinfoil, ' '.join(extra_urls), None, srctree, logger)
+ src_uri.extend(extra_urls)
return src_uri, None, -1, True
return origvalue, None, 0, True
updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
@@ -112,40 +154,9 @@ class NpmRecipeHandler(RecipeHandler):
lines_before.append(line)
return updated
- def _replace_license_vars(self, srctree, lines_before, handled, extravalues, d):
- for item in handled:
- if isinstance(item, tuple):
- if item[0] == 'license':
- del item
- break
-
- calledvars = []
- def varfunc(varname, origvalue, op, newlines):
- if varname in ['LICENSE', 'LIC_FILES_CHKSUM']:
- for i, e in enumerate(reversed(newlines)):
- if not e.startswith('#'):
- stop = i
- while stop > 0:
- newlines.pop()
- stop -= 1
- break
- calledvars.append(varname)
- if len(calledvars) > 1:
- # The second time around, put the new license text in
- insertpos = len(newlines)
- handle_license_vars(srctree, newlines, handled, extravalues, d)
- return None, None, 0, True
- return origvalue, None, 0, True
- updated, newlines = bb.utils.edit_metadata(lines_before, ['LICENSE', 'LIC_FILES_CHKSUM'], varfunc)
- if updated:
- del lines_before[:]
- lines_before.extend(newlines)
- else:
- raise Exception('Did not find license variables')
-
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
import bb.utils
- import oe
+ import oe.package
from collections import OrderedDict
if 'buildsystem' in handled:
@@ -157,6 +168,12 @@ class NpmRecipeHandler(RecipeHandler):
files = RecipeHandler.checkfiles(srctree, ['package.json'])
if files:
+ d = bb.data.createCopy(tinfoil.config_data)
+ npm_bindir = self._ensure_npm()
+ if not npm_bindir:
+ sys.exit(14)
+ d.prependVar('PATH', '%s:' % npm_bindir)
+
data = read_package_json(files[0])
if 'name' in data and 'version' in data:
extravalues['PN'] = data['name']
@@ -168,26 +185,27 @@ class NpmRecipeHandler(RecipeHandler):
if 'homepage' in data:
extravalues['HOMEPAGE'] = data['homepage']
- deps = data.get('dependencies', {})
- updated = self._handle_dependencies(tinfoil.config_data, deps, lines_before, srctree)
- if updated:
- # We need to redo the license stuff
- self._replace_license_vars(srctree, lines_before, handled, extravalues, tinfoil.config_data)
+ fetchdev = extravalues['fetchdev'] or None
+ deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
+ self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
# Shrinkwrap
localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
- self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before)
+ self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
# Lockdown
- self._lockdown(srctree, localfilesdir, extravalues, lines_before)
+ self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
# Split each npm module out to is own package
npmpackages = oe.package.npm_split_package_dirs(srctree)
+ licvalues = None
for item in handled:
if isinstance(item, tuple):
if item[0] == 'license':
licvalues = item[1]
break
+ if not licvalues:
+ licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
if licvalues:
# Augment the license list with information we have in the packages
licenses = {}
@@ -205,14 +223,10 @@ class NpmRecipeHandler(RecipeHandler):
packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
packages['${PN}'] = ''
pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
- all_licenses = list(set([item for pkglicense in pkglicenses.values() for item in pkglicense]))
- # Go back and update the LICENSE value since we have a bit more
- # information than when that was written out (and we know all apply
- # vs. there being a choice, so we can join them with &)
- for i, line in enumerate(lines_before):
- if line.startswith('LICENSE = '):
- lines_before[i] = 'LICENSE = "%s"' % ' & '.join(all_licenses)
- break
+ all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
+ if '&' in all_licenses:
+ all_licenses.remove('&')
+ extravalues['LICENSE'] = ' & '.join(all_licenses)
# Need to move S setting after inherit npm
for i, line in enumerate(lines_before):
@@ -249,17 +263,59 @@ class NpmRecipeHandler(RecipeHandler):
# FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
# (split out from _getdependencies())
- def get_node_version(self, pkg, version, d):
+ def get_npm_data(self, pkg, version, d):
import bb.fetch2
pkgfullname = pkg
if version != '*' and not '/' in version:
pkgfullname += "@'%s'" % version
logger.debug(2, "Calling getdeps on %s" % pkg)
- runenv = dict(os.environ, PATH=d.getVar('PATH', True))
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
fetchcmd = "npm view %s --json" % pkgfullname
output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
data = self._parse_view(output)
- return data.get('version', None)
+ return data
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def get_npm_package_dependencies(self, pdata, fetchdev):
+ dependencies = pdata.get('dependencies', {})
+ optionalDependencies = pdata.get('optionalDependencies', {})
+ dependencies.update(optionalDependencies)
+ if fetchdev:
+ devDependencies = pdata.get('devDependencies', {})
+ dependencies.update(devDependencies)
+ else:
+ devDependencies = {}
+ depsfound = {}
+ optdepsfound = {}
+ devdepsfound = {}
+ for dep in dependencies:
+ if dep in optionalDependencies:
+ optdepsfound[dep] = dependencies[dep]
+ elif dep in devDependencies:
+ devdepsfound[dep] = dependencies[dep]
+ else:
+ depsfound[dep] = dependencies[dep]
+ return depsfound, optdepsfound, devdepsfound
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def check_npm_optional_dependency(self, pdata):
+ pkg_os = pdata.get('os', None)
+ if pkg_os:
+ if not isinstance(pkg_os, list):
+ pkg_os = [pkg_os]
+ blacklist = False
+ for item in pkg_os:
+ if item.startswith('!'):
+ blacklist = True
+ break
+ if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
+ pkg = pdata.get('name', 'Unnamed package')
+ logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
+ return False
+ return True
+
def register_recipe_handlers(handlers):
handlers.append((NpmRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/edit.py b/scripts/lib/recipetool/edit.py
new file mode 100644
index 0000000000..94bdf7bca4
--- /dev/null
+++ b/scripts/lib/recipetool/edit.py
@@ -0,0 +1,44 @@
+# Recipe creation tool - edit plugin
+#
+# This sub-command edits the recipe and appends for the specified target
+#
+# Example: recipetool edit busybox
+#
+# Copyright (C) 2018 Mentor Graphics Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import errno
+import logging
+import os
+import re
+import subprocess
+import sys
+import scriptutils
+
+
+logger = logging.getLogger('recipetool')
+tinfoil = None
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+def edit(args):
+ import oe.recipeutils
+
+ recipe_path = tinfoil.get_recipe_file(args.target)
+ appends = tinfoil.get_file_appends(recipe_path)
+
+ return scriptutils.run_editor([recipe_path] + appends, logger)
+
+
+def register_commands(subparsers):
+ parser = subparsers.add_parser('edit',
+ help='Edit the recipe and appends for the specified target. This obeys $VISUAL if set, otherwise $EDITOR, otherwise vi.')
+ parser.add_argument('target', help='Target recipe/provide to edit')
+ parser.set_defaults(func=edit, parserecipes=True)
diff --git a/scripts/lib/recipetool/newappend.py b/scripts/lib/recipetool/newappend.py
index fbdd7bcef5..08e2474dc4 100644
--- a/scripts/lib/recipetool/newappend.py
+++ b/scripts/lib/recipetool/newappend.py
@@ -7,18 +7,8 @@
#
# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
@@ -39,18 +29,6 @@ def tinfoil_init(instance):
tinfoil = instance
-def _get_recipe_file(cooker, pn):
- import oe.recipeutils
- recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
- if not recipefile:
- skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
- if skipreasons:
- logger.error('\n'.join(skipreasons))
- else:
- logger.error("Unable to find any recipe file matching %s" % pn)
- return recipefile
-
-
def layer(layerpath):
if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
@@ -60,7 +38,7 @@ def layer(layerpath):
def newappend(args):
import oe.recipeutils
- recipe_path = _get_recipe_file(tinfoil.cooker, args.target)
+ recipe_path = tinfoil.get_recipe_file(args.target)
rd = tinfoil.config_data.createCopy()
rd.setVar('FILE', recipe_path)
@@ -70,11 +48,11 @@ def newappend(args):
return 1
if not path_ok:
- logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
+ logger.warning('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
- layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
if not os.path.abspath(args.destlayer) in layerdirs:
- logger.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+ logger.warning('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
if not os.path.exists(append_path):
bb.utils.mkdirhier(os.path.dirname(append_path))
@@ -86,7 +64,7 @@ def newappend(args):
return 1
if args.edit:
- return scriptutils.run_editor([append_path, recipe_path])
+ return scriptutils.run_editor([append_path, recipe_path], logger)
else:
print(append_path)
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
index 85701c06a9..f8e2ee75fb 100644
--- a/scripts/lib/recipetool/setvar.py
+++ b/scripts/lib/recipetool/setvar.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -51,7 +41,7 @@ def setvar(args):
if args.recipe_only:
patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
else:
- rd = oe.recipeutils.parse_recipe(tinfoil.cooker, args.recipefile, None)
+ rd = tinfoil.parse_recipe_file(args.recipefile, False)
if not rd:
return 1
patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
diff --git a/scripts/lib/wic/utils/__init__.py b/scripts/lib/resulttool/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/scripts/lib/wic/utils/__init__.py
+++ b/scripts/lib/resulttool/__init__.py
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
new file mode 100644
index 0000000000..f1bfd99500
--- /dev/null
+++ b/scripts/lib/resulttool/log.py
@@ -0,0 +1,97 @@
+# resulttool - Show logs
+#
+# Copyright (c) 2019 Garmin International
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import resulttool.resultutils as resultutils
+
+def show_ptest(result, ptest, logger):
+ logdata = resultutils.ptestresult_get_log(result, ptest)
+ if logdata is not None:
+ print(logdata)
+ return 0
+
+ print("ptest '%s' log not found" % ptest)
+ return 1
+
+def show_reproducible(result, reproducible, logger):
+ try:
+ print(result['reproducible'][reproducible]['diffoscope.text'])
+ return 0
+
+ except KeyError:
+ print("reproducible '%s' not found" % reproducible)
+ return 1
+
+def log(args, logger):
+ results = resultutils.load_resultsdata(args.source)
+
+ ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
+ if ptest_count > 1 and not args.prepend_run:
+ print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
+ return 1
+
+ for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.dump_ptest and 'ptestresult.sections' in r:
+ for name, ptest in r['ptestresult.sections'].items():
+ logdata = resultutils.ptestresult_get_log(r, name)
+ if logdata is not None:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+
+ os.makedirs(dest_dir, exist_ok=True)
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(logdata)
+
+ if args.raw_ptest:
+ rawlog = resultutils.ptestresult_get_rawlogs(r)
+ if rawlog is not None:
+ print(rawlog)
+ else:
+ print('Raw ptest logs not found')
+ return 1
+
+ if args.raw_reproducible:
+ if 'reproducible.rawlogs' in r:
+ print(r['reproducible.rawlogs']['log'])
+ else:
+ print('Raw reproducible logs not found')
+ return 1
+
+ for ptest in args.ptest:
+ if not show_ptest(r, ptest, logger):
+ return 1
+
+ for reproducible in args.reproducible:
+ if not show_reproducible(r, reproducible, logger):
+ return 1
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser = subparsers.add_parser('log', help='show logs',
+ description='show the logs from test results',
+ group='analysis')
+ parser.set_defaults(func=log)
+ parser.add_argument('source',
+ help='the results file/directory/URL to import')
+ parser.add_argument('--ptest', action='append', default=[],
+ help='show logs for a ptest')
+ parser.add_argument('--dump-ptest', metavar='DIR',
+ help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--reproducible', action='append', default=[],
+ help='show logs for a reproducible test')
+ parser.add_argument('--prepend-run', action='store_true',
+ help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
+ Required if more than one test run is present in the result file''')
+ parser.add_argument('--raw', action='store_true',
+ help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
+ parser.add_argument('--raw-ptest', action='store_true',
+ help='show raw ptest log')
+ parser.add_argument('--raw-reproducible', action='store_true',
+ help='show raw reproducible build logs')
+
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000000..ecb27c5933
--- /dev/null
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,235 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+import copy
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(f):
+ with open(f, "r") as filedata:
+ return json.load(filedata)
+
+def write_json_file(f, json_data):
+ os.makedirs(os.path.dirname(f), exist_ok=True)
+ with open(f, 'w') as filedata:
+ filedata.write(json.dumps(json_data, sort_keys=True, indent=4))
+
+class ManualTestRunner(object):
+
+ def _get_test_module(self, case_file):
+ return os.path.basename(case_file).split('.')[0]
+
+ def _get_input(self, config):
+ while True:
+ output = input('{} = '.format(config))
+ if re.match('^[a-z0-9-.]+$', output):
+ break
+ print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
+ return output
+
+ def _get_available_config_options(self, config_options, test_module, target_config):
+ avail_config_options = None
+ if test_module in config_options:
+ avail_config_options = config_options[test_module].get(target_config)
+ return avail_config_options
+
+ def _choose_config_option(self, options):
+ while True:
+ output = input('{} = '.format('Option index number'))
+ if output in options:
+ break
+ print('Only integer index inputs from above available configuration options are allowed. Please try again.')
+ return options[output]
+
+ def _get_config(self, config_options, test_module):
+ from oeqa.utils.metadata import get_layers
+ from oeqa.utils.commands import get_bb_var
+ from resulttool.resultutils import store_map
+
+ layers = get_layers(get_bb_var('BBLAYERS'))
+ configurations = {}
+ configurations['LAYERS'] = layers
+ configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ configurations['TEST_TYPE'] = 'manual'
+ configurations['TEST_MODULE'] = test_module
+
+ extra_config = set(store_map['manual']) - set(configurations)
+ for config in sorted(extra_config):
+ avail_config_options = self._get_available_config_options(config_options, test_module, config)
+ if avail_config_options:
+ print('---------------------------------------------')
+ print('These are available configuration #%s options:' % config)
+ print('---------------------------------------------')
+ for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
+ print('%s: %s' % (option, avail_config_options[option]))
+ print('Please select configuration option, enter the integer index number.')
+ value_conf = self._choose_config_option(avail_config_options)
+ print('---------------------------------------------\n')
+ else:
+ print('---------------------------------------------')
+ print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
+ print('---------------------------------------------')
+ value_conf = self._get_input('Configuration Value')
+ print('---------------------------------------------\n')
+ configurations[config] = value_conf
+ return configurations
+
+ def _execute_test_steps(self, case):
+ test_result = {}
+ print('------------------------------------------------------------------------')
+ print('Executing test case: %s' % case['test']['@alias'])
+ print('------------------------------------------------------------------------')
+ print('You have total %s test steps to be executed.' % len(case['test']['execution']))
+ print('------------------------------------------------------------------------\n')
+ for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
+ print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
+ expected_output = case['test']['execution'][step]['expected_results']
+ if expected_output:
+ print('Expected output: %s' % expected_output)
+ while True:
+ done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
+ result_types = {'p':'PASSED',
+ 'f':'FAILED',
+ 'b':'BLOCKED',
+ 's':'SKIPPED'}
+ if done in result_types:
+ for r in result_types:
+ if done == r:
+ res = result_types[r]
+ if res == 'FAILED':
+ log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+ test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
+ else:
+ test_result.update({case['test']['@alias']: {'status': '%s' % res}})
+ break
+ print('Invalid input!')
+ return test_result
+
+ def _get_write_dir(self):
+ return os.environ['BUILDDIR'] + '/tmp/log/manual/'
+
+ def run_test(self, case_file, config_options_file, testcase_config_file):
+ test_module = self._get_test_module(case_file)
+ cases = load_json_file(case_file)
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ configurations = self._get_config(config_options, test_module)
+ result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
+ test_results = {}
+ if testcase_config_file:
+ test_case_config = load_json_file(testcase_config_file)
+ test_case_to_execute = test_case_config['testcases']
+ for case in copy.deepcopy(cases) :
+ if case['test']['@alias'] not in test_case_to_execute:
+ cases.remove(case)
+
+ print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
+ for c in cases:
+ test_result = self._execute_test_steps(c)
+ test_results.update(test_result)
+ return configurations, result_id, self._get_write_dir(), test_results
+
+ def _get_true_false_input(self, input_message):
+ yes_list = ['Y', 'YES']
+ no_list = ['N', 'NO']
+ while True:
+ more_config_option = input(input_message).upper()
+ if more_config_option in yes_list or more_config_option in no_list:
+ break
+ print('Invalid input!')
+ if more_config_option in no_list:
+ return False
+ return True
+
+ def make_config_option_file(self, logger, case_file, config_options_file):
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ new_test_module = self._get_test_module(case_file)
+ print('Creating configuration options file for test module: %s' % new_test_module)
+ new_config_options = {}
+
+ while True:
+ config_name = input('\nPlease provide test configuration to create:\n').upper()
+ new_config_options[config_name] = {}
+ while True:
+ config_value = self._get_input('Configuration possible option value')
+ config_option_index = len(new_config_options[config_name]) + 1
+ new_config_options[config_name][config_option_index] = config_value
+ more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
+ if not more_config_option:
+ break
+ more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
+ if not more_config:
+ break
+
+ if new_config_options:
+ config_options[new_test_module] = new_config_options
+ if not config_options_file:
+ config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
+ write_json_file(config_options_file, config_options)
+ logger.info('Configuration option file created at %s' % config_options_file)
+
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
+def manualexecution(args, logger):
+ testrunner = ManualTestRunner()
+ if args.make_config_options_file:
+ testrunner.make_config_option_file(logger, args.file, args.config_options_file)
+ return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
+ configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
+ resultjsonhelper = OETestResultJSONHelper()
+ resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+ description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+ group='manualexecution')
+ parser_build.set_defaults(func=manualexecution)
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
+ parser_build.add_argument('-c', '--config-options-file', default='',
+ help='the config options file to import and used as available configuration option selection or make config option file')
+ parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
+ help='make the configuration options file based on provided inputs')
+ parser_build.add_argument('-t', '--testcase-config-file', default='',
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000000..18b4825a18
--- /dev/null
+++ b/scripts/lib/resulttool/merge.py
@@ -0,0 +1,46 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
+ description='merge the results from multiple files/directories/URLs into the target file or directory',
+ group='setup')
+ parser_build.set_defaults(func=merge)
+ parser_build.add_argument('base_results',
+ help='the results file/directory/URL to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000000..9f952951b3
--- /dev/null
+++ b/scripts/lib/resulttool/regression.py
@@ -0,0 +1,186 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import resulttool.resultutils as resultutils
+import json
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+ if result:
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ else:
+ resultstring = "Match: %s\n %s" % (base_name, target_name)
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(sorted(matches)))
+ print("\n".join(sorted(regressions)))
+ print("\n".join(sorted(notfound)))
+
+ return 0
+
+def regression_git(args, logger):
+ base_results = {}
+ target_results = {}
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory/URL for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory/URL to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+ description='regression analysis comparing base result set to target '
+ 'result set',
+ group='analysis')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) default select regression based on configurations unless base result '
+ 'id was provided')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) default select regression based on configurations unless target result '
+ 'id was provided')
+
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000000..692dd7a851
--- /dev/null
+++ b/scripts/lib/resulttool/report.py
@@ -0,0 +1,290 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.ltptests = {}
+ self.ltpposixtests = {}
+ self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
+ 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
+ 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
+
+
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
+
+ if k == 'ptestresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ptestresult.sections']:
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] += " T"
+ return True
+
+ # process test result
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return True
+
+ # Handle 'glib-2.0'
+ if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+
+ # do not process duplicate results
+ if test in self.ptests[machine][suite]["testcases"]:
+ print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
+ return False
+
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[machine][suite][tk] += 1
+ self.ptests[machine][suite]["testcases"].add(test)
+ return True
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
+
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ print("split2: %s %s %s" % (suite, suite1, test))
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
+
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[machine][suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult, machine):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ result = testresult.get('result', [])
+ for k in result:
+ test_status = result[k].get('status', [])
+ if k.startswith("ptestresult."):
+ if not self.handle_ptest_result(k, test_status, result, machine):
+ continue
+ elif k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result, machine)
+ elif k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
+
+ # process result if it was not skipped by a handler
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
+ test_count_report[tk] += 1
+ if test_status in self.result_types['failed']:
+ test_count_report['failed_testcases'].append(k)
+ return test_count_report
+
+ def print_test_report(self, template_file_name, test_count_reports):
+ from jinja2 import Environment, FileSystemLoader
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ template = env.get_template(template_file_name)
+ havefailed = False
+ reportvalues = []
+ machines = []
+ cols = ['passed', 'failed', 'skipped']
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
+ for line in test_count_reports:
+ total_tested = line['passed'] + line['failed'] + line['skipped']
+ vals = {}
+ vals['result_id'] = line['result_id']
+ vals['testseries'] = line['testseries']
+ vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
+ for k in cols:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ for k in maxlen:
+ if k in vals and len(vals[k]) > maxlen[k]:
+ maxlen[k] = len(vals[k])
+ reportvalues.append(vals)
+ if line['failed_testcases']:
+ havefailed = True
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ reporttotalvalues = {}
+ for k in cols:
+ reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
+ reporttotalvalues['count'] = '%s' % len(test_count_reports)
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
+ output = template.render(reportvalues=reportvalues,
+ reporttotalvalues=reporttotalvalues,
+ havefailed=havefailed,
+ machines=machines,
+ ptests=self.ptests,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
+ maxlen=maxlen)
+ print(output)
+
+ def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test):
+ test_count_reports = []
+ configmap = resultutils.store_map
+ if use_regression_map:
+ configmap = resultutils.regression_map
+ if commit:
+ if tag:
+ logger.warning("Ignoring --tag as --commit was specified")
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(source_dir)
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+ rev_index = gitarchive.rev_find(revs, 'commit', commit)
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
+ elif tag:
+ repo = GitRepo(source_dir)
+ testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
+ else:
+ testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
+ if raw_test:
+ raw_results = {}
+ for testsuite in testresults:
+ result = testresults[testsuite].get(raw_test, {})
+ if result:
+ raw_results[testsuite] = result
+ if raw_results:
+ print(json.dumps(raw_results, sort_keys=True, indent=4))
+ else:
+ print('Could not find raw test result for %s' % raw_test)
+ return 0
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ skip = False
+ result = testresults[testsuite][resultid]
+ machine = result['configuration']['MACHINE']
+
+ # Check to see if there is already results for these kinds of tests for the machine
+ for key in result['result'].keys():
+ testtype = str(key).split('.')[0]
+ if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
+ (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
+ print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
+ skip = True
+ break
+ if skip:
+ break
+
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
+ test_count_report['testseries'] = result['configuration']['TESTSERIES']
+ test_count_report['result_id'] = resultid
+ test_count_reports.append(test_count_report)
+ self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+ report = ResultsTextReport()
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
+ args.raw_test_only)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('report', help='summarise test results',
+ description='print a text-based summary of the test results',
+ group='analysis')
+ parser_build.set_defaults(func=report)
+ parser_build.add_argument('source_dir',
+ help='source file/directory/URL that contain the test result files to summarise')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--commit', help="Revision to report")
+ parser_build.add_argument('-t', '--tag', default='',
+ help='source_dir is a git repository, report on the tag specified from that repository')
+ parser_build.add_argument('-m', '--use_regression_map', action='store_true',
+ help='instead of the default "store_map", use the "regression_map" for report')
+ parser_build.add_argument('-r', '--raw_test_only', default='',
+ help='output raw test result only for the user provided test result id')
+
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000000..f0ae8ec1c5
--- /dev/null
+++ b/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,221 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import base64
+import zlib
+import json
+import scriptpath
+import copy
+import urllib.request
+import posixpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+ "oeselftest": [],
+ "runtime": [],
+ "sdk": [],
+ "sdkext": [],
+ "manual": []
+}
+regression_map = {
+ "oeselftest": ['TEST_TYPE', 'MACHINE'],
+ "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+ "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+ "oeselftest": ['TEST_TYPE'],
+ "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+ "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+def is_url(p):
+ """
+ Helper for determining if the given path is a URL
+ """
+ return p.startswith('http://') or p.startswith('https://')
+
+extra_configvars = {'TESTSERIES': ''}
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
+ if type(f) is str:
+ if is_url(f):
+ with urllib.request.urlopen(f) as response:
+ data = json.loads(response.read().decode('utf-8'))
+ url = urllib.parse.urlparse(f)
+ testseries = posixpath.basename(posixpath.dirname(url.path))
+ else:
+ with open(f, "r") as filedata:
+ data = json.load(filedata)
+ testseries = os.path.basename(os.path.dirname(f))
+ else:
+ data = f
+ for res in data:
+ if "configuration" not in data[res] or "result" not in data[res]:
+ raise ValueError("Test results data without configuration or result section?")
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
+ testtype = data[res]["configuration"].get("TEST_TYPE")
+ if testtype not in configmap:
+ raise ValueError("Unknown test type %s" % testtype)
+ testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+ if testpath not in results:
+ results[testpath] = {}
+ results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
+ results = {}
+ if is_url(source) or os.path.isfile(source):
+ append_resultsdata(results, source, configmap, configvars)
+ return results
+ for root, dirs, files in os.walk(source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ append_resultsdata(results, f, configmap, configvars)
+ return results
+
+def filter_resultsdata(results, resultid):
+ newresults = {}
+ for r in results:
+ for i in results[r]:
+ if i == resultsid:
+ newresults[r] = {}
+ newresults[r][i] = results[r][i]
+ return newresults
+
+def strip_ptestresults(results):
+ newresults = copy.deepcopy(results)
+ #for a in newresults2:
+ # newresults = newresults2[a]
+ for res in newresults:
+ if 'result' not in newresults[res]:
+ continue
+ if 'ptestresult.rawlogs' in newresults[res]['result']:
+ del newresults[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in newresults[res]['result']:
+ for i in newresults[res]['result']['ptestresult.sections']:
+ if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
+ del newresults[res]['result']['ptestresult.sections'][i]['log']
+ return newresults
+
+def decode_log(logdata):
+ if isinstance(logdata, str):
+ return logdata
+ elif isinstance(logdata, dict):
+ if "compressed" in logdata:
+ data = logdata.get("compressed")
+ data = base64.b64decode(data.encode("utf-8"))
+ data = zlib.decompress(data)
+ try:
+ return data.decode("utf-8")
+ except UnicodeDecodeError:
+ return data
+ return None
+
+def ptestresult_get_log(results, section):
+ if 'ptestresult.sections' not in results:
+ return None
+ if section not in results['ptestresult.sections']:
+ return None
+
+ ptest = results['ptestresult.sections'][section]
+ if 'log' not in ptest:
+ return None
+ return decode_log(ptest['log'])
+
+def ptestresult_get_rawlogs(results):
+ if 'ptestresult.rawlogs' not in results:
+ return None
+ if 'log' not in results['ptestresult.rawlogs']:
+ return None
+ return decode_log(results['ptestresult.rawlogs']['log'])
+
+def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
+ for res in results:
+ if res:
+ dst = destdir + "/" + res + "/" + fn
+ else:
+ dst = destdir + "/" + fn
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ resultsout = results[res]
+ if not ptestjson:
+ resultsout = strip_ptestresults(results[res])
+ with open(dst, 'w') as f:
+ f.write(json.dumps(resultsout, sort_keys=True, indent=4))
+ for res2 in results[res]:
+ if ptestlogs and 'result' in results[res][res2]:
+ seriesresults = results[res][res2]['result']
+ rawlogs = ptestresult_get_rawlogs(seriesresults)
+ if rawlogs is not None:
+ with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
+ f.write(rawlogs)
+ if 'ptestresult.sections' in seriesresults:
+ for i in seriesresults['ptestresult.sections']:
+ sectionlog = ptestresult_get_log(seriesresults, i)
+ if sectionlog is not None:
+ with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
+ f.write(sectionlog)
+
+def git_get_result(repo, tags, configmap=store_map):
+ git_objs = []
+ for tag in tags:
+ files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+ git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d))
+ return objs
+
+ # Optimize by reading all data with one git command
+ results = {}
+ for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+ append_resultsdata(results, obj, configmap=configmap)
+
+ return results
+
+def test_run_results(results):
+ """
+ Convenient generator function that iterates over all test runs that have a
+ result section.
+
+ Generates a tuple of:
+ (result json file path, test run name, test run (dict), test run "results" (dict))
+ for each test run that has a "result" section
+ """
+ for path in results:
+ for run_name, test_run in results[path].items():
+ if not 'result' in test_run:
+ continue
+ yield path, run_name, test_run, test_run['result']
+
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000000..e0951f0a8f
--- /dev/null
+++ b/scripts/lib/resulttool/store.py
@@ -0,0 +1,104 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+ tempdir = tempfile.mkdtemp(prefix='testresults.')
+ try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if args.extra_test_env:
+ configvars['EXTRA_TEST_ENV'] = args.extra_test_env
+ results = {}
+ logger.info('Reading files from %s' % args.source)
+ if resultutils.is_url(args.source) or os.path.isfile(args.source):
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
+ else:
+ for root, dirs, files in os.walk(args.source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ resultutils.append_resultsdata(results, f, configvars=configvars)
+ elif args.all:
+ dst = f.replace(args.source, tempdir + "/")
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(f, dst)
+
+ revisions = {}
+
+ if not results and not args.all:
+ if args.allow_empty:
+ logger.info("No results found to store")
+ return 0
+ logger.error("No results found to store")
+ return 1
+
+ # Find the branch/commit/commit_count and ensure they all match
+ for suite in results:
+ for result in results[suite]:
+ config = results[suite][result]['configuration']['LAYERS']['meta']
+ revision = (config['commit'], config['branch'], str(config['commit_count']))
+ if revision not in revisions:
+ revisions[revision] = {}
+ if suite not in revisions[revision]:
+ revisions[revision][suite] = {}
+ revisions[revision][suite][result] = results[suite][result]
+
+ logger.info("Found %d revisions to store" % len(revisions))
+
+ for r in revisions:
+ results = revisions[r]
+ keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+ subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+ resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
+
+ logger.info('Storing test result into git repository %s' % args.git_dir)
+
+ gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+ "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+ False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+ 'Test run #{tag_number} of {branch}:{commit}', '',
+ [], [], False, keywords, logger)
+
+ finally:
+ subprocess.check_call(["rm", "-rf", tempdir])
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+ description='takes a results file or directory of results files and stores '
+ 'them into the destination git repository, splitting out the results '
+ 'files as configured',
+ group='setup')
+ parser_build.set_defaults(func=store)
+ parser_build.add_argument('source',
+ help='source file/directory/URL that contain the test result files to be stored')
+ parser_build.add_argument('git_dir',
+ help='the location of the git repository to store the results in')
+ parser_build.add_argument('-a', '--all', action='store_true',
+ help='include all files, not just testresults.json files')
+ parser_build.add_argument('-e', '--allow-empty', action='store_true',
+ help='don\'t error if no results to store are found')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
+ parser_build.add_argument('-t', '--extra-test-env', default='',
+ help='add extra test environment data to each result file configuration')
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000000..2efba2ef6f
--- /dev/null
+++ b/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,79 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+
+{% for machine in machines %}
+{% if ptests[machine] %}
+==============================================================================================================
+{{ machine }} PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltptests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+ {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}
diff --git a/scripts/lib/scriptpath.py b/scripts/lib/scriptpath.py
index d00317e18d..f32326db3a 100644
--- a/scripts/lib/scriptpath.py
+++ b/scripts/lib/scriptpath.py
@@ -3,18 +3,8 @@
# Copyright (C) 2012-2014 Intel Corporation
# Copyright (C) 2011 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index 5ccc027968..f92255d8dc 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -2,29 +2,65 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import sys
-import os
-import logging
-import glob
import argparse
+import glob
+import logging
+import os
+import random
+import shlex
+import shutil
+import string
import subprocess
+import sys
+import tempfile
+import threading
+import importlib
+from importlib import machinery
+
+class KeepAliveStreamHandler(logging.StreamHandler):
+ def __init__(self, keepalive=True, **kwargs):
+ super().__init__(**kwargs)
+ if keepalive is True:
+ keepalive = 5000 # default timeout
+ self._timeout = threading.Condition()
+ self._stop = False
+
+ # background thread waits on condition, if the condition does not
+ # happen emit a keep alive message
+ def thread():
+ while not self._stop:
+ with self._timeout:
+ if not self._timeout.wait(keepalive):
+ self.emit(logging.LogRecord("keepalive", logging.INFO,
+ None, None, "Keepalive message", None, None))
-def logger_create(name):
+ self._thread = threading.Thread(target = thread, daemon = True)
+ self._thread.start()
+
+ def close(self):
+ # mark the thread to stop and notify it
+ self._stop = True
+ with self._timeout:
+ self._timeout.notify()
+ # wait for it to join
+ self._thread.join()
+ super().close()
+
+ def emit(self, record):
+ super().emit(record)
+ # trigger timer reset
+ with self._timeout:
+ self._timeout.notify()
+
+def logger_create(name, stream=None, keepalive=None):
logger = logging.getLogger(name)
- loggerhandler = logging.StreamHandler()
+ if keepalive is not None:
+ loggerhandler = KeepAliveStreamHandler(stream=stream, keepalive=keepalive)
+ else:
+ loggerhandler = logging.StreamHandler(stream=stream)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO)
@@ -32,35 +68,36 @@ def logger_create(name):
def logger_setup_color(logger, color='auto'):
from bb.msg import BBLogFormatter
- console = logging.StreamHandler(sys.stdout)
- formatter = BBLogFormatter("%(levelname)s: %(message)s")
- console.setFormatter(formatter)
- logger.handlers = [console]
- if color == 'always' or (color=='auto' and console.stream.isatty()):
- formatter.enable_color()
+
+ for handler in logger.handlers:
+ if (isinstance(handler, logging.StreamHandler) and
+ isinstance(handler.formatter, BBLogFormatter)):
+ if color == 'always' or (color == 'auto' and handler.stream.isatty()):
+ handler.formatter.enable_color()
def load_plugins(logger, plugins, pluginpath):
- import imp
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
- fp, pathname, description = imp.find_module(name, [pluginpath])
- try:
- return imp.load_module(name, fp, pathname, description)
- finally:
- if fp:
- fp.close()
+ spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
+ if spec:
+ return spec.loader.load_module()
+
+ def plugin_name(filename):
+ return os.path.splitext(os.path.basename(filename))[0]
+ known_plugins = [plugin_name(p.__name__) for p in plugins]
logger.debug('Loading plugins from %s...' % pluginpath)
for fn in glob.glob(os.path.join(pluginpath, '*.py')):
- name = os.path.splitext(os.path.basename(fn))[0]
- if name != '__init__':
+ name = plugin_name(fn)
+ if name != '__init__' and name not in known_plugins:
plugin = load_plugin(name)
if hasattr(plugin, 'plugin_init'):
plugin.plugin_init(plugins)
plugins.append(plugin)
+
def git_convert_standalone_clone(repodir):
"""If specified directory is a git repository, ensure it's a standalone clone"""
import bb.process
@@ -72,49 +109,150 @@ def git_convert_standalone_clone(repodir):
bb.process.run('git repack -a', cwd=repodir)
os.remove(alternatesfile)
-def fetch_uri(d, uri, destdir, srcrev=None):
- """Fetch a URI to a local directory"""
- import bb.data
- bb.utils.mkdirhier(destdir)
- localdata = bb.data.createCopy(d)
- localdata.setVar('BB_STRICT_CHECKSUM', '')
- localdata.setVar('SRCREV', srcrev)
- ret = (None, None)
- olddir = os.getcwd()
+def _get_temp_recipe_dir(d):
+ # This is a little bit hacky but we need to find a place where we can put
+ # the recipe so that bitbake can find it. We're going to delete it at the
+ # end so it doesn't really matter where we put it.
+ bbfiles = d.getVar('BBFILES').split()
+ fetchrecipedir = None
+ for pth in bbfiles:
+ if pth.endswith('.bb'):
+ pthdir = os.path.dirname(pth)
+ if os.access(os.path.dirname(os.path.dirname(pthdir)), os.W_OK):
+ fetchrecipedir = pthdir.replace('*', 'recipetool')
+ if pthdir.endswith('workspace/recipes/*'):
+ # Prefer the workspace
+ break
+ return fetchrecipedir
+
+class FetchUrlFailure(Exception):
+ def __init__(self, url):
+ self.url = url
+ def __str__(self):
+ return "Failed to fetch URL %s" % self.url
+
+def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirrors=False):
+ """
+ Fetch the specified URL using normal do_fetch and do_unpack tasks, i.e.
+ any dependencies that need to be satisfied in order to support the fetch
+ operation will be taken care of
+ """
+
+ import bb
+
+ checksums = {}
+ fetchrecipepn = None
+
+ # We need to put our temp directory under ${BASE_WORKDIR} otherwise
+ # we may have problems with the recipe-specific sysroot population
+ tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
+ bb.utils.mkdirhier(tmpparent)
+ tmpdir = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
try:
- fetcher = bb.fetch2.Fetch([uri], localdata)
- for u in fetcher.ud:
- ud = fetcher.ud[u]
- ud.ignore_checksums = True
- fetcher.download()
- for u in fetcher.ud:
- ud = fetcher.ud[u]
- if ud.localpath.rstrip(os.sep) == localdata.getVar('DL_DIR', True).rstrip(os.sep):
- raise Exception('Local path is download directory - please check that the URI "%s" is correct' % uri)
- fetcher.unpack(destdir)
- for u in fetcher.ud:
- ud = fetcher.ud[u]
- if ud.method.recommends_checksum(ud):
- md5value = bb.utils.md5_file(ud.localpath)
- sha256value = bb.utils.sha256_file(ud.localpath)
- ret = (md5value, sha256value)
+ tmpworkdir = os.path.join(tmpdir, 'work')
+ logger.debug('fetch_url: temp dir is %s' % tmpdir)
+
+ fetchrecipedir = _get_temp_recipe_dir(tinfoil.config_data)
+ if not fetchrecipedir:
+ logger.error('Searched BBFILES but unable to find a writeable place to put temporary recipe')
+ sys.exit(1)
+ fetchrecipe = None
+ bb.utils.mkdirhier(fetchrecipedir)
+ try:
+ # Generate a dummy recipe so we can follow more or less normal paths
+ # for do_fetch and do_unpack
+ # I'd use tempfile functions here but underscores can be produced by that and those
+ # aren't allowed in recipe file names except to separate the version
+ rndstring = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
+ fetchrecipe = os.path.join(fetchrecipedir, 'tmp-recipetool-%s.bb' % rndstring)
+ fetchrecipepn = os.path.splitext(os.path.basename(fetchrecipe))[0]
+ logger.debug('Generating initial recipe %s for fetching' % fetchrecipe)
+ with open(fetchrecipe, 'w') as f:
+ # We don't want to have to specify LIC_FILES_CHKSUM
+ f.write('LICENSE = "CLOSED"\n')
+ # We don't need the cross-compiler
+ f.write('INHIBIT_DEFAULT_DEPS = "1"\n')
+ # We don't have the checksums yet so we can't require them
+ f.write('BB_STRICT_CHECKSUM = "ignore"\n')
+ f.write('SRC_URI = "%s"\n' % srcuri)
+ f.write('SRCREV = "%s"\n' % srcrev)
+ f.write('WORKDIR = "%s"\n' % tmpworkdir)
+ # Set S out of the way so it doesn't get created under the workdir
+ f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
+ if not mirrors:
+ # We do not need PREMIRRORS since we are almost certainly
+ # fetching new source rather than something that has already
+ # been fetched. Hence, we disable them by default.
+ # However, we provide an option for users to enable it.
+ f.write('PREMIRRORS = ""\n')
+ f.write('MIRRORS = ""\n')
+
+ logger.info('Fetching %s...' % srcuri)
+
+ # FIXME this is too noisy at the moment
+
+ # Parse recipes so our new recipe gets picked up
+ tinfoil.parse_recipes()
+
+ def eventhandler(event):
+ if isinstance(event, bb.fetch2.MissingChecksumEvent):
+ checksums.update(event.checksums)
+ return True
+ return False
+
+ # Run the fetch + unpack tasks
+ res = tinfoil.build_targets(fetchrecipepn,
+ 'do_unpack',
+ handle_events=True,
+ extra_events=['bb.fetch2.MissingChecksumEvent'],
+ event_callback=eventhandler)
+ if not res:
+ raise FetchUrlFailure(srcuri)
+
+ # Remove unneeded directories
+ rd = tinfoil.parse_recipe(fetchrecipepn)
+ if rd:
+ pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
+ for pathvar in pathvars:
+ path = rd.getVar(pathvar)
+ shutil.rmtree(path)
+ finally:
+ if fetchrecipe:
+ try:
+ os.remove(fetchrecipe)
+ except FileNotFoundError:
+ pass
+ try:
+ os.rmdir(fetchrecipedir)
+ except OSError as e:
+ import errno
+ if e.errno != errno.ENOTEMPTY:
+ raise
+
+ bb.utils.mkdirhier(destdir)
+ for fn in os.listdir(tmpworkdir):
+ shutil.move(os.path.join(tmpworkdir, fn), destdir)
+
finally:
- os.chdir(olddir)
- return ret
+ if not preserve_tmp:
+ shutil.rmtree(tmpdir)
+ tmpdir = None
-def run_editor(fn):
+ return checksums, tmpdir
+
+
+def run_editor(fn, logger=None):
if isinstance(fn, str):
- params = '"%s"' % fn
+ files = [fn]
else:
- params = ''
- for fnitem in fn:
- params += ' "%s"' % fnitem
+ files = fn
editor = os.getenv('VISUAL', os.getenv('EDITOR', 'vi'))
try:
- return subprocess.check_call('%s %s' % (editor, params), shell=True)
- except OSError as exc:
- logger.error("Execution of editor '%s' failed: %s", editor, exc)
+ #print(shlex.split(editor) + files)
+ return subprocess.check_call(shlex.split(editor) + files)
+ except subprocess.CalledProcessError as exc:
+ logger.error("Execution of '%s' failed: %s" % (editor, exc))
return 1
def is_src_url(param):
@@ -129,3 +267,13 @@ def is_src_url(param):
elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
return True
return False
+
+def filter_src_subdirs(pth):
+ """
+ Filter out subdirectories of initial unpacked source trees that we do not care about.
+ Used by devtool and recipetool.
+ """
+ dirlist = os.listdir(pth)
+ filterout = ['git.indirectionsymlink', 'source-date-epoch']
+ dirlist = [x for x in dirlist if x not in filterout]
+ return dirlist
diff --git a/scripts/lib/wic/__init__.py b/scripts/lib/wic/__init__.py
index 63c1d9c846..85567934ae 100644
--- a/scripts/lib/wic/__init__.py
+++ b/scripts/lib/wic/__init__.py
@@ -1,4 +1,10 @@
-import os, sys
+#!/usr/bin/env python3
+#
+# Copyright (c) 2007 Red Hat, Inc.
+# Copyright (c) 2011 Intel, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
-cur_path = os.path.dirname(__file__) or '.'
-sys.path.insert(0, cur_path + '/3rdparty')
+class WicError(Exception):
+ pass
diff --git a/scripts/lib/wic/__version__.py b/scripts/lib/wic/__version__.py
deleted file mode 100644
index 5452a46712..0000000000
--- a/scripts/lib/wic/__version__.py
+++ /dev/null
@@ -1 +0,0 @@
-VERSION = "2.00"
diff --git a/scripts/lib/wic/canned-wks/common.wks.inc b/scripts/lib/wic/canned-wks/common.wks.inc
index 5cf2fd1f3e..89880b417b 100644
--- a/scripts/lib/wic/canned-wks/common.wks.inc
+++ b/scripts/lib/wic/canned-wks/common.wks.inc
@@ -1,3 +1,3 @@
# This file is included into 3 canned wks files from this directory
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+part / --source rootfs --use-uuid --fstype=ext4 --label platform --align 1024
diff --git a/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
index a16bd6ac61..c58e74a853 100644
--- a/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
+++ b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
@@ -1,11 +1,27 @@
# This is an example configuration file for syslinux.
-PROMPT 0
-TIMEOUT 10
-
+TIMEOUT 50
ALLOWOPTIONS 1
SERIAL 0 115200
+PROMPT 0
+
+UI vesamenu.c32
+menu title Select boot options
+menu tabmsg Press [Tab] to edit, [Return] to select
+
+DEFAULT Graphics console boot
+
+LABEL Graphics console boot
+KERNEL /vmlinuz
+APPEND label=boot rootwait
+
+LABEL Serial console boot
+KERNEL /vmlinuz
+APPEND label=boot rootwait console=ttyS0,115200
+
+LABEL Graphics console install
+KERNEL /vmlinuz
+APPEND label=install rootwait
-DEFAULT boot
-LABEL boot
+LABEL Serial console install
KERNEL /vmlinuz
-APPEND label=boot root=/dev/sda2 rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0
+APPEND label=install rootwait console=ttyS0,115200
diff --git a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
new file mode 100644
index 0000000000..7300e65e32
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
@@ -0,0 +1,3 @@
+bootloader --ptable gpt
+part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.0
+part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/
diff --git a/scripts/lib/wic/canned-wks/mkefidisk.wks b/scripts/lib/wic/canned-wks/mkefidisk.wks
index 696e94e3d7..9f534fe184 100644
--- a/scripts/lib/wic/canned-wks/mkefidisk.wks
+++ b/scripts/lib/wic/canned-wks/mkefidisk.wks
@@ -4,8 +4,8 @@
part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
+bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/scripts/lib/wic/canned-wks/mkgummidisk.wks b/scripts/lib/wic/canned-wks/mkgummidisk.wks
deleted file mode 100644
index 66a22f60bd..0000000000
--- a/scripts/lib/wic/canned-wks/mkgummidisk.wks
+++ /dev/null
@@ -1,11 +0,0 @@
-# short-description: Create an EFI disk image
-# long-description: Creates a partitioned EFI disk image that the user
-# can directly dd to boot media.
-
-part /boot --source bootimg-efi --sourceparams="loader=gummiboot" --ondisk sda --label msdos --active --align 1024
-
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
-
-part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-
-bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
diff --git a/scripts/lib/wic/canned-wks/mkhybridiso.wks b/scripts/lib/wic/canned-wks/mkhybridiso.wks
index 9d34e9b477..48c5ac4791 100644
--- a/scripts/lib/wic/canned-wks/mkhybridiso.wks
+++ b/scripts/lib/wic/canned-wks/mkhybridiso.wks
@@ -2,6 +2,6 @@
# long-description: Creates an EFI and legacy bootable hybrid ISO image
# which can be used on optical media as well as USB media.
-part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi,image_name=HYBRID_ISO_IMG" --ondisk cd --label HYBRIDISO --fstype=ext4
+part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi,image_name=HYBRID_ISO_IMG" --ondisk cd --label HYBRIDISO
bootloader --timeout=15 --append=""
diff --git a/scripts/lib/wic/canned-wks/mksystemd-bootdisk.wks b/scripts/lib/wic/canned-wks/mksystemd-bootdisk.wks
deleted file mode 100644
index df706e8aaa..0000000000
--- a/scripts/lib/wic/canned-wks/mksystemd-bootdisk.wks
+++ /dev/null
@@ -1,11 +0,0 @@
-# short-description: Create an EFI disk image
-# long-description: Creates a partitioned EFI disk image that the user
-# can directly dd to boot media.
-
-part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024
-
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
-
-part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-
-bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
diff --git a/scripts/lib/wic/canned-wks/qemuriscv.wks b/scripts/lib/wic/canned-wks/qemuriscv.wks
new file mode 100644
index 0000000000..12c68b7069
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuriscv.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for RISC-V QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index a6518a0f45..c8d9f121b5 100644
--- a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="vga=0 uvesafb.mode_option=640x480-32 root=/dev/vda2 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="vga=0 rw oprofile.timer=1 rootfstype=ext4 "
diff --git a/scripts/lib/wic/canned-wks/sdimage-bootpart.wks b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
index 7ffd632f4a..63bc4dab6a 100644
--- a/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
+++ b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
@@ -2,5 +2,5 @@
# long-description: Creates a partitioned SD card image. Boot files
# are located in the first vfat partition.
-part /boot --source bootimg-partition --ondisk mmcblk --fstype=vfat --label boot --active --align 4 --size 16
-part / --source rootfs --ondisk mmcblk --fstype=ext4 --label root --align 4
+part /boot --source bootimg-partition --ondisk mmcblk0 --fstype=vfat --label boot --active --align 4 --size 16
+part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --label root --align 4
diff --git a/scripts/lib/wic/canned-wks/systemd-bootdisk.wks b/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
new file mode 100644
index 0000000000..95d7b97a60
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image with systemd-boot
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media. The selected bootloader is systemd-boot.
+
+part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024 --use-uuid
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap --use-uuid
+
+bootloader --ptable gpt --timeout=5 --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/scripts/lib/wic/conf.py b/scripts/lib/wic/conf.py
deleted file mode 100644
index 070ec3096b..0000000000
--- a/scripts/lib/wic/conf.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-
-from wic.ksparser import KickStart, KickStartError
-from wic import msger
-from wic.utils import misc
-
-
-def get_siteconf():
- wic_path = os.path.dirname(__file__)
- eos = wic_path.find('scripts') + len('scripts')
- scripts_path = wic_path[:eos]
-
- return scripts_path + "/lib/image/config/wic.conf"
-
-class ConfigMgr(object):
- DEFAULTS = {
- 'common': {
- "distro_name": "Default Distribution",
- "plugin_dir": "/usr/lib/wic/plugins"}, # TODO use prefix also?
- 'create': {
- "tmpdir": '/var/tmp/wic',
- "outdir": './wic-output',
- "release": None,
- "logfile": None,
- "name_prefix": None,
- "name_suffix": None}
- }
-
- # make the manager class as singleton
- _instance = None
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(ConfigMgr, cls).__new__(cls, *args, **kwargs)
-
- return cls._instance
-
- def __init__(self, ksconf=None, siteconf=None):
- # reset config options
- self.reset()
-
- if not siteconf:
- siteconf = get_siteconf()
-
- # initial options from siteconf
- self._siteconf = siteconf
-
- if ksconf:
- self._ksconf = ksconf
-
- def reset(self):
- self.__ksconf = None
- self.__siteconf = None
- self.create = {}
-
- # initialize the values with defaults
- for sec, vals in self.DEFAULTS.items():
- setattr(self, sec, vals)
-
- def __set_ksconf(self, ksconf):
- if not os.path.isfile(ksconf):
- msger.error('Cannot find ks file: %s' % ksconf)
-
- self.__ksconf = ksconf
- self._parse_kickstart(ksconf)
- def __get_ksconf(self):
- return self.__ksconf
- _ksconf = property(__get_ksconf, __set_ksconf)
-
- def _parse_kickstart(self, ksconf=None):
- if not ksconf:
- return
-
- try:
- ksobj = KickStart(ksconf)
- except KickStartError as err:
- msger.error(str(err))
-
- self.create['ks'] = ksobj
- self.create['name'] = os.path.splitext(os.path.basename(ksconf))[0]
-
- self.create['name'] = misc.build_name(ksconf,
- self.create['release'],
- self.create['name_prefix'],
- self.create['name_suffix'])
-
-configmgr = ConfigMgr()
diff --git a/scripts/lib/wic/config/wic.conf b/scripts/lib/wic/config/wic.conf
deleted file mode 100644
index a51bcb55eb..0000000000
--- a/scripts/lib/wic/config/wic.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-[common]
-; general settings
-distro_name = OpenEmbedded
-
-[create]
-; settings for create subcommand
diff --git a/scripts/lib/wic/creator.py b/scripts/lib/wic/creator.py
deleted file mode 100644
index 8f7d1503f5..0000000000
--- a/scripts/lib/wic/creator.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os, sys
-from optparse import OptionParser, SUPPRESS_HELP
-
-from wic import msger
-from wic.utils import errors
-from wic.conf import configmgr
-from wic.plugin import pluginmgr
-
-
-class Creator():
- """${name}: create an image
-
- Usage:
- ${name} SUBCOMMAND <ksfile> [OPTS]
-
- ${command_list}
- ${option_list}
- """
-
- name = 'wic create(cr)'
-
- def __init__(self, *args, **kwargs):
- self._subcmds = {}
-
- # get cmds from pluginmgr
- # mix-in do_subcmd interface
- for subcmd, klass in pluginmgr.get_plugins('imager').items():
- if not hasattr(klass, 'do_create'):
- msger.warning("Unsupported subcmd: %s" % subcmd)
- continue
-
- func = getattr(klass, 'do_create')
- self._subcmds[subcmd] = func
-
- def get_optparser(self):
- optparser = OptionParser()
- optparser.add_option('-d', '--debug', action='store_true',
- dest='debug',
- help=SUPPRESS_HELP)
- optparser.add_option('-v', '--verbose', action='store_true',
- dest='verbose',
- help=SUPPRESS_HELP)
- optparser.add_option('', '--logfile', type='string', dest='logfile',
- default=None,
- help='Path of logfile')
- optparser.add_option('-c', '--config', type='string', dest='config',
- default=None,
- help='Specify config file for wic')
- optparser.add_option('-o', '--outdir', type='string', action='store',
- dest='outdir', default=None,
- help='Output directory')
- optparser.add_option('', '--tmpfs', action='store_true', dest='enabletmpfs',
- help='Setup tmpdir as tmpfs to accelerate, experimental'
- ' feature, use it if you have more than 4G memory')
- optparser.add_option('', '--bmap', action='store_true', help='generate .bmap')
- return optparser
-
- def postoptparse(self, options):
- abspath = lambda pth: os.path.abspath(os.path.expanduser(pth))
-
- if options.verbose:
- msger.set_loglevel('verbose')
- if options.debug:
- msger.set_loglevel('debug')
-
- if options.logfile:
- logfile_abs_path = abspath(options.logfile)
- if os.path.isdir(logfile_abs_path):
- raise errors.Usage("logfile's path %s should be file"
- % options.logfile)
- if not os.path.exists(os.path.dirname(logfile_abs_path)):
- os.makedirs(os.path.dirname(logfile_abs_path))
- msger.set_interactive(False)
- msger.set_logfile(logfile_abs_path)
- configmgr.create['logfile'] = options.logfile
-
- if options.config:
- configmgr.reset()
- configmgr._siteconf = options.config
-
- if options.outdir is not None:
- configmgr.create['outdir'] = abspath(options.outdir)
-
- cdir = 'outdir'
- if os.path.exists(configmgr.create[cdir]) \
- and not os.path.isdir(configmgr.create[cdir]):
- msger.error('Invalid directory specified: %s' \
- % configmgr.create[cdir])
-
- if options.enabletmpfs:
- configmgr.create['enabletmpfs'] = options.enabletmpfs
-
- def main(self, argv=None):
- if argv is None:
- argv = sys.argv
- else:
- argv = argv[:] # don't modify caller's list
-
- pname = argv[0]
- if pname not in self._subcmds:
- msger.error('Unknown plugin: %s' % pname)
-
- optparser = self.get_optparser()
- options, args = optparser.parse_args(argv)
-
- self.postoptparse(options)
-
- return self._subcmds[pname](options, *args[1:])
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index 5b104631ca..24797511e5 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -28,14 +14,22 @@
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
+import logging
import os
-import sys
+import tempfile
+import json
+import subprocess
+import re
+
+from collections import namedtuple, OrderedDict
+from distutils.spawn import find_executable
-from wic import msger, creator
-from wic.utils import misc
-from wic.plugin import pluginmgr
-from wic.utils.oe import misc
+from wic import WicError
+from wic.filemap import sparse_copy
+from wic.pluginbase import PluginMgr
+from wic.misc import get_bitbake_var, exec_cmd
+logger = logging.getLogger('wic')
def verify_build_env():
"""
@@ -44,23 +38,25 @@ def verify_build_env():
Returns True if it is, false otherwise
"""
if not os.environ.get("BUILDDIR"):
- print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
- sys.exit(1)
+ raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
return True
CANNED_IMAGE_DIR = "lib/wic/canned-wks" # relative to scripts
SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
+WIC_DIR = "wic"
def build_canned_image_list(path):
- layers_path = misc.get_bitbake_var("BBLAYERS")
+ layers_path = get_bitbake_var("BBLAYERS")
canned_wks_layer_dirs = []
if layers_path is not None:
for layer_path in layers_path.split():
- cpath = os.path.join(layer_path, SCRIPTS_CANNED_IMAGE_DIR)
- canned_wks_layer_dirs.append(cpath)
+ for wks_path in (WIC_DIR, SCRIPTS_CANNED_IMAGE_DIR):
+ cpath = os.path.join(layer_path, wks_path)
+ if os.path.isdir(cpath):
+ canned_wks_layer_dirs.append(cpath)
cpath = os.path.join(path, CANNED_IMAGE_DIR)
canned_wks_layer_dirs.append(cpath)
@@ -80,7 +76,8 @@ def find_canned_image(scripts_path, wks_file):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
+ (fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
@@ -97,7 +94,7 @@ def list_canned_images(scripts_path):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks"):
+ if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
@@ -106,7 +103,7 @@ def list_canned_images(scripts_path):
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
- basename = os.path.splitext(fname)[0]
+ basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
@@ -137,26 +134,24 @@ def list_source_plugins():
"""
List the available source plugins i.e. plugins available for --source.
"""
- plugins = pluginmgr.get_source_plugins()
+ plugins = PluginMgr.get_plugins('source')
for plugin in plugins:
print(" %s" % plugin)
def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
- native_sysroot, scripts_path, image_output_dir,
- compressor, bmap, debug):
- """Create image
+ native_sysroot, options):
+ """
+ Create image
wks_file - user-defined OE kickstart file
rootfs_dir - absolute path to the build's /rootfs dir
bootimg_dir - absolute path to the build's boot artifacts directory
kernel_dir - absolute path to the build's kernel directory
native_sysroot - absolute path to the build's native sysroots dir
- scripts_path - absolute path to /scripts dir
image_output_dir - dirname to create for image
- compressor - compressor utility to compress the image
- bmap - enable generation of .bmap
+ options - wic command line options (debug, bmap, etc)
Normally, the values for the build artifacts values are determined
by 'wic -e' from the output of the 'bitbake -e' command given an
@@ -179,47 +174,451 @@ def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
try:
oe_builddir = os.environ["BUILDDIR"]
except KeyError:
- print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
- sys.exit(1)
+ raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
- if debug:
- msger.set_loglevel('debug')
+ if not os.path.exists(options.outdir):
+ os.makedirs(options.outdir)
- crobj = creator.Creator()
+ pname = options.imager
+ plugin_class = PluginMgr.get_plugins('imager').get(pname)
+ if not plugin_class:
+ raise WicError('Unknown plugin: %s' % pname)
- cmdline = ["direct", native_sysroot, kernel_dir, bootimg_dir, rootfs_dir,
- wks_file, image_output_dir, oe_builddir, compressor or ""]
- if bmap:
- cmdline.append('--bmap')
+ plugin = plugin_class(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, oe_builddir, options)
- crobj.main(cmdline)
+ plugin.do_create()
- print("\nThe image(s) were created using OE kickstart file:\n %s" % wks_file)
+ logger.info("The image(s) were created using OE kickstart file:\n %s", wks_file)
def wic_list(args, scripts_path):
"""
Print the list of images or source plugins.
"""
- if len(args) < 1:
+ if args.list_type is None:
return False
- if args == ["images"]:
+ if args.list_type == "images":
+
list_canned_images(scripts_path)
return True
- elif args == ["source-plugins"]:
+ elif args.list_type == "source-plugins":
list_source_plugins()
return True
- elif len(args) == 2 and args[1] == "help":
- wks_file = args[0]
+ elif len(args.help_for) == 1 and args.help_for[0] == 'help':
+ wks_file = args.list_type
fullpath = find_canned_image(scripts_path, wks_file)
if not fullpath:
- print("No image named %s found, exiting. "\
- "(Use 'wic list images' to list available images, or "\
- "specify a fully-qualified OE kickstart (.wks) "\
- "filename)\n" % wks_file)
- sys.exit(1)
+ raise WicError("No image named %s found, exiting. "
+ "(Use 'wic list images' to list available images, "
+ "or specify a fully-qualified OE kickstart (.wks) "
+ "filename)" % wks_file)
+
list_canned_image_help(scripts_path, fullpath)
return True
return False
+
+
+class Disk:
+ def __init__(self, imagepath, native_sysroot, fstypes=('fat', 'ext')):
+ self.imagepath = imagepath
+ self.native_sysroot = native_sysroot
+ self.fstypes = fstypes
+ self._partitions = None
+ self._partimages = {}
+ self._lsector_size = None
+ self._psector_size = None
+ self._ptable_format = None
+
+ # find parted
+ # read paths from $PATH environment variable
+ # if it fails, use hardcoded paths
+ pathlist = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ try:
+ self.paths = os.environ['PATH'] + ":" + pathlist
+ except KeyError:
+ self.paths = pathlist
+
+ if native_sysroot:
+ for path in pathlist.split(':'):
+ self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
+
+ self.parted = find_executable("parted", self.paths)
+ if not self.parted:
+ raise WicError("Can't find executable parted")
+
+ self.partitions = self.get_partitions()
+
+ def __del__(self):
+ for path in self._partimages.values():
+ os.unlink(path)
+
+ def get_partitions(self):
+ if self._partitions is None:
+ self._partitions = OrderedDict()
+ out = exec_cmd("%s -sm %s unit B print" % (self.parted, self.imagepath))
+ parttype = namedtuple("Part", "pnum start end size fstype")
+ splitted = out.splitlines()
+ # skip over possible errors in exec_cmd output
+ try:
+ idx =splitted.index("BYT;")
+ except ValueError:
+ raise WicError("Error getting partition information from %s" % (self.parted))
+ lsector_size, psector_size, self._ptable_format = splitted[idx + 1].split(":")[3:6]
+ self._lsector_size = int(lsector_size)
+ self._psector_size = int(psector_size)
+ for line in splitted[idx + 2:]:
+ pnum, start, end, size, fstype = line.split(':')[:5]
+ partition = parttype(int(pnum), int(start[:-1]), int(end[:-1]),
+ int(size[:-1]), fstype)
+ self._partitions[pnum] = partition
+
+ return self._partitions
+
+ def __getattr__(self, name):
+ """Get path to the executable in a lazy way."""
+ if name in ("mdir", "mcopy", "mdel", "mdeltree", "sfdisk", "e2fsck",
+ "resize2fs", "mkswap", "mkdosfs", "debugfs"):
+ aname = "_%s" % name
+ if aname not in self.__dict__:
+ setattr(self, aname, find_executable(name, self.paths))
+ if aname not in self.__dict__ or self.__dict__[aname] is None:
+ raise WicError("Can't find executable '{}'".format(name))
+ return self.__dict__[aname]
+ return self.__dict__[name]
+
+ def _get_part_image(self, pnum):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image")
+ part = self.partitions[pnum]
+ # check if fstype is supported
+ for fstype in self.fstypes:
+ if part.fstype.startswith(fstype):
+ break
+ else:
+ raise WicError("Not supported fstype: {}".format(part.fstype))
+ if pnum not in self._partimages:
+ tmpf = tempfile.NamedTemporaryFile(prefix="wic-part")
+ dst_fname = tmpf.name
+ tmpf.close()
+ sparse_copy(self.imagepath, dst_fname, skip=part.start, length=part.size)
+ self._partimages[pnum] = dst_fname
+
+ return self._partimages[pnum]
+
+ def _put_part_image(self, pnum):
+ """Put partition image into partitioned image."""
+ sparse_copy(self._partimages[pnum], self.imagepath,
+ seek=self.partitions[pnum].start)
+
+ def dir(self, pnum, path):
+ if self.partitions[pnum].fstype.startswith('ext'):
+ return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
+ self._get_part_image(pnum),
+ path), as_shell=True)
+ else: # fat
+ return exec_cmd("{} -i {} ::{}".format(self.mdir,
+ self._get_part_image(pnum),
+ path))
+
+ def copy(self, src, dest):
+ """Copy partition image into wic image."""
+ pnum = dest.part if isinstance(src, str) else src.part
+
+ if self.partitions[pnum].fstype.startswith('ext'):
+ if isinstance(src, str):
+ cmd = "printf 'cd {}\nwrite {} {}\n' | {} -w {}".\
+ format(os.path.dirname(dest.path), src, os.path.basename(src),
+ self.debugfs, self._get_part_image(pnum))
+ else: # copy from wic
+ # run both dump and rdump to support both files and directory
+ cmd = "printf 'cd {}\ndump /{} {}\nrdump /{} {}\n' | {} {}".\
+ format(os.path.dirname(src.path), src.path,
+ dest, src.path, dest, self.debugfs,
+ self._get_part_image(pnum))
+ else: # fat
+ if isinstance(src, str):
+ cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src, dest.path)
+ else:
+ cmd = "{} -i {} -snop ::{} {}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src.path, dest)
+
+ exec_cmd(cmd, as_shell=True)
+ self._put_part_image(pnum)
+
+ def remove_ext(self, pnum, path, recursive):
+ """
+ Remove files/dirs and their contents from the partition.
+ This only applies to ext* partition.
+ """
+ abs_path = re.sub('\/\/+', '/', path)
+ cmd = "{} {} -wR 'rm \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path)
+ out = exec_cmd(cmd , as_shell=True)
+ for line in out.splitlines():
+ if line.startswith("rm:"):
+ if "file is a directory" in line:
+ if recursive:
+ # loop through content and delete them one by one if
+ # flaged with -r
+ subdirs = iter(self.dir(pnum, abs_path).splitlines())
+ next(subdirs)
+ for subdir in subdirs:
+ dir = subdir.split(':')[1].split(" ", 1)[1]
+ if not dir == "." and not dir == "..":
+ self.remove_ext(pnum, "%s/%s" % (abs_path, dir), recursive)
+
+ rmdir_out = exec_cmd("{} {} -wR 'rmdir \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path.rstrip('/'))
+ , as_shell=True)
+
+ for rmdir_line in rmdir_out.splitlines():
+ if "directory not empty" in rmdir_line:
+ raise WicError("Could not complete operation: \n%s \n"
+ "use -r to remove non-empty directory" % rmdir_line)
+ if rmdir_line.startswith("rmdir:"):
+ raise WicError("Could not complete operation: \n%s "
+ "\n%s" % (str(line), rmdir_line))
+
+ else:
+ raise WicError("Could not complete operation: \n%s "
+ "\nUnable to remove %s" % (str(line), abs_path))
+
+ def remove(self, pnum, path, recursive):
+ """Remove files/dirs from the partition."""
+ partimg = self._get_part_image(pnum)
+ if self.partitions[pnum].fstype.startswith('ext'):
+ self.remove_ext(pnum, path, recursive)
+
+ else: # fat
+ cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
+ try:
+ exec_cmd(cmd)
+ except WicError as err:
+ if "not found" in str(err) or "non empty" in str(err):
+ # mdel outputs 'File ... not found' or 'directory .. non empty"
+ # try to use mdeltree as path could be a directory
+ cmd = "{} -i {} ::{}".format(self.mdeltree,
+ partimg, path)
+ exec_cmd(cmd)
+ else:
+ raise err
+ self._put_part_image(pnum)
+
+ def write(self, target, expand):
+ """Write disk image to the media or file."""
+ def write_sfdisk_script(outf, parts):
+ for key, val in parts['partitiontable'].items():
+ if key in ("partitions", "device", "firstlba", "lastlba"):
+ continue
+ if key == "id":
+ key = "label-id"
+ outf.write("{}: {}\n".format(key, val))
+ outf.write("\n")
+ for part in parts['partitiontable']['partitions']:
+ line = ''
+ for name in ('attrs', 'name', 'size', 'type', 'uuid'):
+ if name == 'size' and part['type'] == 'f':
+ # don't write size for extended partition
+ continue
+ val = part.get(name)
+ if val:
+ line += '{}={}, '.format(name, val)
+ if line:
+ line = line[:-2] # strip ', '
+ if part.get('bootable'):
+ line += ' ,bootable'
+ outf.write("{}\n".format(line))
+ outf.flush()
+
+ def read_ptable(path):
+ out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
+ return json.loads(out)
+
+ def write_ptable(parts, target):
+ with tempfile.NamedTemporaryFile(prefix="wic-sfdisk-", mode='w') as outf:
+ write_sfdisk_script(outf, parts)
+ cmd = "{} --no-reread {} < {} ".format(self.sfdisk, target, outf.name)
+ exec_cmd(cmd, as_shell=True)
+
+ if expand is None:
+ sparse_copy(self.imagepath, target)
+ else:
+ # copy first sectors that may contain bootloader
+ sparse_copy(self.imagepath, target, length=2048 * self._lsector_size)
+
+ # copy source partition table to the target
+ parts = read_ptable(self.imagepath)
+ write_ptable(parts, target)
+
+ # get size of unpartitioned space
+ free = None
+ for line in exec_cmd("{} -F {}".format(self.sfdisk, target)).splitlines():
+ if line.startswith("Unpartitioned space ") and line.endswith("sectors"):
+ free = int(line.split()[-2])
+ # Align free space to a 2048 sector boundary. YOCTO #12840.
+ free = free - (free % 2048)
+ if free is None:
+ raise WicError("Can't get size of unpartitioned space")
+
+ # calculate expanded partitions sizes
+ sizes = {}
+ num_auto_resize = 0
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ if num in expand:
+ if expand[num] != 0: # don't resize partition if size is set to 0
+ sectors = expand[num] // self._lsector_size
+ free -= sectors - part['size']
+ part['size'] = sectors
+ sizes[num] = sectors
+ elif part['type'] != 'f':
+ sizes[num] = -1
+ num_auto_resize += 1
+
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ if sizes.get(num) == -1:
+ part['size'] += free // num_auto_resize
+
+ # write resized partition table to the target
+ write_ptable(parts, target)
+
+ # read resized partition table
+ parts = read_ptable(target)
+
+ # copy partitions content
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ pnum = str(num)
+ fstype = self.partitions[pnum].fstype
+
+ # copy unchanged partition
+ if part['size'] == self.partitions[pnum].size // self._lsector_size:
+ logger.info("copying unchanged partition {}".format(pnum))
+ sparse_copy(self._get_part_image(pnum), target, seek=part['start'] * self._lsector_size)
+ continue
+
+ # resize or re-create partitions
+ if fstype.startswith('ext') or fstype.startswith('fat') or \
+ fstype.startswith('linux-swap'):
+
+ partfname = None
+ with tempfile.NamedTemporaryFile(prefix="wic-part{}-".format(pnum)) as partf:
+ partfname = partf.name
+
+ if fstype.startswith('ext'):
+ logger.info("resizing ext partition {}".format(pnum))
+ partimg = self._get_part_image(pnum)
+ sparse_copy(partimg, partfname)
+ exec_cmd("{} -pf {}".format(self.e2fsck, partfname))
+ exec_cmd("{} {} {}s".format(\
+ self.resize2fs, partfname, part['size']))
+ elif fstype.startswith('fat'):
+ logger.info("copying content of the fat partition {}".format(pnum))
+ with tempfile.TemporaryDirectory(prefix='wic-fatdir-') as tmpdir:
+ # copy content to the temporary directory
+ cmd = "{} -snompi {} :: {}".format(self.mcopy,
+ self._get_part_image(pnum),
+ tmpdir)
+ exec_cmd(cmd)
+ # create new msdos partition
+ label = part.get("name")
+ label_str = "-n {}".format(label) if label else ''
+
+ cmd = "{} {} -C {} {}".format(self.mkdosfs, label_str, partfname,
+ part['size'])
+ exec_cmd(cmd)
+ # copy content from the temporary directory to the new partition
+ cmd = "{} -snompi {} {}/* ::".format(self.mcopy, partfname, tmpdir)
+ exec_cmd(cmd, as_shell=True)
+ elif fstype.startswith('linux-swap'):
+ logger.info("creating swap partition {}".format(pnum))
+ label = part.get("name")
+ label_str = "-L {}".format(label) if label else ''
+ uuid = part.get("uuid")
+ uuid_str = "-U {}".format(uuid) if uuid else ''
+ with open(partfname, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), part['size'] * self._lsector_size)
+ exec_cmd("{} {} {} {}".format(self.mkswap, label_str, uuid_str, partfname))
+ sparse_copy(partfname, target, seek=part['start'] * self._lsector_size)
+ os.unlink(partfname)
+ elif part['type'] != 'f':
+ logger.warning("skipping partition {}: unsupported fstype {}".format(pnum, fstype))
+
+def wic_ls(args, native_sysroot):
+ """List contents of partitioned image or vfat partition."""
+ disk = Disk(args.path.image, native_sysroot)
+ if not args.path.part:
+ if disk.partitions:
+ print('Num Start End Size Fstype')
+ for part in disk.partitions.values():
+ print("{:2d} {:12d} {:12d} {:12d} {}".format(\
+ part.pnum, part.start, part.end,
+ part.size, part.fstype))
+ else:
+ path = args.path.path or '/'
+ print(disk.dir(args.path.part, path))
+
+def wic_cp(args, native_sysroot):
+ """
+ Copy file or directory to/from the vfat/ext partition of
+ partitioned image.
+ """
+ if isinstance(args.dest, str):
+ disk = Disk(args.src.image, native_sysroot)
+ else:
+ disk = Disk(args.dest.image, native_sysroot)
+ disk.copy(args.src, args.dest)
+
+
+def wic_rm(args, native_sysroot):
+ """
+ Remove files or directories from the vfat partition of
+ partitioned image.
+ """
+ disk = Disk(args.path.image, native_sysroot)
+ disk.remove(args.path.part, args.path.path, args.recursive_delete)
+
+def wic_write(args, native_sysroot):
+ """
+ Write image to a target device.
+ """
+ disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'linux-swap'))
+ disk.write(args.target, args.expand)
+
+def find_canned(scripts_path, file_name):
+ """
+ Find a file either by its path or by name in the canned files dir.
+
+ Return None if not found
+ """
+ if os.path.exists(file_name):
+ return file_name
+
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname == file_name:
+ fullpath = os.path.join(canned_wks_dir, fname)
+ return fullpath
+
+def get_custom_config(boot_file):
+ """
+ Get the custom configuration to be used for the bootloader.
+
+ Return None if the file can't be found.
+ """
+ # Get the scripts path of poky
+ scripts_path = os.path.abspath("%s/../.." % os.path.dirname(__file__))
+
+ cfg_file = find_canned(scripts_path, boot_file)
+ if cfg_file:
+ with open(cfg_file, "r") as f:
+ config = f.read()
+ return config
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index f3240ba8d8..a3919fbcad 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -1,13 +1,8 @@
+#
# Copyright (c) 2012 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License, version 2,
-# as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
"""
This module implements python implements a way to get file block. Two methods
@@ -22,6 +17,7 @@ and returns an instance of the class.
# * Too many instance attributes (R0902)
# pylint: disable=R0902
+import errno
import os
import struct
import array
@@ -34,14 +30,21 @@ def get_block_size(file_obj):
Returns block size for file object 'file_obj'. Errors are indicated by the
'IOError' exception.
"""
-
- from fcntl import ioctl
- import struct
-
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
- binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
- return struct.unpack('I', binary_data)[0]
+ try:
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ except OSError:
+ raise IOError("Unable to determine block size")
+ bsize = struct.unpack('I', binary_data)[0]
+ if not bsize:
+ import os
+ stat = os.fstat(file_obj.fileno())
+ if hasattr(stat, 'st_blksize'):
+ bsize = stat.st_blksize
+ else:
+ raise IOError("Unable to determine block size")
+ return bsize
class ErrorNotSupp(Exception):
"""
@@ -185,9 +188,9 @@ def _lseek(file_obj, offset, whence):
except OSError as err:
# The 'lseek' system call returns the ENXIO if there is no data or
# hole starting from the specified offset.
- if err.errno == os.errno.ENXIO:
+ if err.errno == errno.ENXIO:
return -1
- elif err.errno == os.errno.EINVAL:
+ elif err.errno == errno.EINVAL:
raise ErrorNotSupp("the kernel or file-system does not support "
"\"SEEK_HOLE\" and \"SEEK_DATA\"")
else:
@@ -228,7 +231,7 @@ class FilemapSeek(_FilemapBase):
try:
tmp_obj = tempfile.TemporaryFile("w+", dir=directory)
except IOError as err:
- raise ErrorNotSupp("cannot create a temporary in \"%s\": %s"
+ raise ErrorNotSupp("cannot create a temporary in \"%s\": %s" \
% (directory, err))
try:
@@ -390,12 +393,12 @@ class FilemapFiemap(_FilemapBase):
except IOError as err:
# Note, the FIEMAP ioctl is supported by the Linux kernel starting
# from version 2.6.28 (year 2008).
- if err.errno == os.errno.EOPNOTSUPP:
+ if err.errno == errno.EOPNOTSUPP:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the file-system"
self._log.debug(errstr)
raise ErrorNotSupp(errstr)
- if err.errno == os.errno.ENOTTY:
+ if err.errno == errno.ENOTTY:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the kernel"
self._log.debug(errstr)
@@ -530,23 +533,51 @@ def filemap(image, log=None):
except ErrorNotSupp:
return FilemapSeek(image, log)
-def sparse_copy(src_fname, dst_fname, offset=0, skip=0):
- """Efficiently copy sparse file to or into another file."""
- fmap = filemap(src_fname)
+def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
+ length=0, api=None):
+ """
+ Efficiently copy sparse file to or into another file.
+
+ src_fname: path to source file
+ dst_fname: path to destination file
+ skip: skip N bytes at thestart of src
+ seek: seek N bytes from the start of dst
+ length: read N bytes from src and write them to dst
+ api: FilemapFiemap or FilemapSeek object
+ """
+ if not api:
+ api = filemap
+ fmap = api(src_fname)
try:
dst_file = open(dst_fname, 'r+b')
except IOError:
dst_file = open(dst_fname, 'wb')
+ if length:
+ dst_size = length + seek
+ else:
+ dst_size = os.path.getsize(src_fname) + seek - skip
+ dst_file.truncate(dst_size)
+ written = 0
for first, last in fmap.get_mapped_ranges(0, fmap.blocks_cnt):
start = first * fmap.block_size
end = (last + 1) * fmap.block_size
+ if skip >= end:
+ continue
+
if start < skip < end:
start = skip
fmap._f_image.seek(start, os.SEEK_SET)
- dst_file.seek(offset + start, os.SEEK_SET)
+
+ written += start - skip - written
+ if length and written >= length:
+ dst_file.seek(seek + length, os.SEEK_SET)
+ dst_file.close()
+ return
+
+ dst_file.seek(seek + start - skip, os.SEEK_SET)
chunk_size = 1024 * 1024
to_read = end - start
@@ -555,7 +586,14 @@ def sparse_copy(src_fname, dst_fname, offset=0, skip=0):
while read < to_read:
if read + chunk_size > to_read:
chunk_size = to_read - read
- chunk = fmap._f_image.read(chunk_size)
+ size = chunk_size
+ if length and written + size > length:
+ size = length - written
+ chunk = fmap._f_image.read(size)
dst_file.write(chunk)
- read += chunk_size
+ read += size
+ written += size
+ if written == length:
+ dst_file.close()
+ return
dst_file.close()
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index e5347ec4b7..29c4e436d8 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -1,21 +1,6 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements some basic help invocation functions along
@@ -28,10 +13,12 @@
import subprocess
import logging
-from wic.plugin import pluginmgr, PLUGIN_TYPES
+from wic.pluginbase import PluginMgr, PLUGIN_TYPES
+
+logger = logging.getLogger('wic')
def subcommand_error(args):
- logging.info("invalid subcommand %s" % args[0])
+ logger.info("invalid subcommand %s", args[0])
def display_help(subcommand, subcommands):
@@ -54,7 +41,7 @@ def wic_help(args, usage_str, subcommands):
"""
Subcommand help dispatcher.
"""
- if len(args) == 1 or not display_help(args[1], subcommands):
+ if args.help_topic == None or not display_help(args.help_topic, subcommands):
print(usage_str)
@@ -66,7 +53,7 @@ def get_wic_plugins_help():
result = wic_plugins_help
for plugin_type in PLUGIN_TYPES:
result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
- for name, plugin in pluginmgr.get_plugins(plugin_type).items():
+ for name, plugin in PluginMgr.get_plugins(plugin_type).items():
result += "\n %s plugin:\n" % name
if plugin.__doc__:
result += plugin.__doc__
@@ -80,19 +67,20 @@ def invoke_subcommand(args, parser, main_command_usage, subcommands):
Dispatch to subcommand handler borrowed from combo-layer.
Should use argparse, but has to work in 2.6.
"""
- if not args:
- logging.error("No subcommand specified, exiting")
+ if not args.command:
+ logger.error("No subcommand specified, exiting")
parser.print_help()
return 1
- elif args[0] == "help":
+ elif args.command == "help":
wic_help(args, main_command_usage, subcommands)
- elif args[0] not in subcommands:
- logging.error("Unsupported subcommand %s, exiting\n" % (args[0]))
+ elif args.command not in subcommands:
+ logger.error("Unsupported subcommand %s, exiting\n", args.command)
parser.print_help()
return 1
else:
- usage = subcommands.get(args[0], subcommand_error)[1]
- subcommands.get(args[0], subcommand_error)[0](args[1:], usage)
+ subcmd = subcommands.get(args.command, subcommand_error)
+ usage = subcmd[1]
+ subcmd[0](args, usage)
##
@@ -128,10 +116,10 @@ wic_create_usage = """
Create a new OpenEmbedded image
usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
- [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
+ [-c, --compress-with] [-m, --bmap]
This command creates an OpenEmbedded image based on the 'OE kickstart
commands' found in the <wks file>.
@@ -152,7 +140,7 @@ SYNOPSIS
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
- [-c, --compress-with] [-m, --bmap]
+ [-c, --compress-with] [-m, --bmap] [--no-fstab-update]
DESCRIPTION
This command creates an OpenEmbedded image based on the 'OE
@@ -224,6 +212,11 @@ DESCRIPTION
The -m option is used to produce .bmap file for the image. This file
can be used to flash image using bmaptool utility.
+
+ The --no-fstab-update option is used to doesn't change fstab file. When
+ using this option the final fstab file will be same that in rootfs and
+ wic doesn't update file, e.g adding a new mount point. User can control
+ the fstab file content in base-files recipe.
"""
wic_list_usage = """
@@ -281,6 +274,243 @@ DESCRIPTION
details.
"""
+wic_ls_usage = """
+
+ List content of a partitioned image
+
+ usage: wic ls <image>[:<partition>[<path>]] [--native-sysroot <path>]
+
+ This command outputs either list of image partitions or directory contents
+ of vfat and ext* partitions.
+
+ See 'wic help ls' for more detailed instructions.
+
+"""
+
+wic_ls_help = """
+
+NAME
+ wic ls - List contents of partitioned image or partition
+
+SYNOPSIS
+ wic ls <image>
+ wic ls <image>:<vfat or ext* partition>
+ wic ls <image>:<vfat or ext* partition><path>
+ wic ls <image>:<vfat or ext* partition><path> --native-sysroot <path>
+
+DESCRIPTION
+ This command lists either partitions of the image or directory contents
+ of vfat or ext* partitions.
+
+ The first form it lists partitions of the image.
+ For example:
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic
+ Num Start End Size Fstype
+ 1 1048576 24438783 23390208 fat16
+ 2 25165824 50315263 25149440 ext4
+
+ Second and third form list directory content of the partition:
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 2DF2-5F02
+ Directory for ::/
+
+ efi <DIR> 2017-05-11 10:54
+ startup nsh 26 2017-05-11 10:54
+ vmlinuz 6922288 2017-05-11 10:54
+ 3 files 6 922 314 bytes
+ 15 818 752 bytes free
+
+
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/EFI/boot/
+ Volume in drive : is boot
+ Volume Serial Number is 2DF2-5F02
+ Directory for ::/EFI/boot
+
+ . <DIR> 2017-05-11 10:54
+ .. <DIR> 2017-05-11 10:54
+ grub cfg 679 2017-05-11 10:54
+ bootx64 efi 571392 2017-05-11 10:54
+ 4 files 572 071 bytes
+ 15 818 752 bytes free
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+
+"""
+
+wic_cp_usage = """
+
+ Copy files and directories to/from the vfat or ext* partition
+
+ usage: wic cp <src> <dest> [--native-sysroot <path>]
+
+ source/destination image in format <image>:<partition>[<path>]
+
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
+
+ See 'wic help cp' for more detailed instructions.
+
+"""
+
+wic_cp_help = """
+
+NAME
+ wic cp - copy files and directories to/from the vfat or ext* partitions
+
+SYNOPSIS
+ wic cp <src> <dest>:<partition>
+ wic cp <src>:<partition> <dest>
+ wic cp <src> <dest-image>:<partition><path>
+ wic cp <src> <dest-image>:<partition><path> --native-sysroot <path>
+
+DESCRIPTION
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
+
+ The first form of it copies file or directory to the root directory of
+ the partition:
+ $ wic cp test.wks tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is DB4C-FD4C
+ Directory for ::/
+
+ efi <DIR> 2017-05-24 18:15
+ loader <DIR> 2017-05-24 18:15
+ startup nsh 26 2017-05-24 18:15
+ vmlinuz 6926384 2017-05-24 18:15
+ test wks 628 2017-05-24 21:22
+ 5 files 6 927 038 bytes
+ 15 677 440 bytes free
+
+ The second form of the command copies file or directory to the specified directory
+ on the partition:
+ $ wic cp test tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
+ Volume in drive : is boot
+ Volume Serial Number is DB4C-FD4C
+ Directory for ::/efi
+
+ . <DIR> 2017-05-24 18:15
+ .. <DIR> 2017-05-24 18:15
+ boot <DIR> 2017-05-24 18:15
+ test <DIR> 2017-05-24 21:27
+ 4 files 0 bytes
+ 15 675 392 bytes free
+
+ The third form of the command copies file or directory from the specified directory
+ on the partition to local:
+ $ wic cp tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/vmlinuz test
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+"""
+
+wic_rm_usage = """
+
+ Remove files or directories from the vfat or ext* partitions
+
+ usage: wic rm <image>:<partition><path> [--native-sysroot <path>]
+
+ This command removes files or directories from the vfat or ext* partitions of
+ the partitioned image.
+
+ See 'wic help rm' for more detailed instructions.
+
+"""
+
+wic_rm_help = """
+
+NAME
+ wic rm - remove files or directories from the vfat or ext* partitions
+
+SYNOPSIS
+ wic rm <src> <image>:<partition><path>
+ wic rm <src> <image>:<partition><path> --native-sysroot <path>
+ wic rm -r <image>:<partition><path>
+
+DESCRIPTION
+ This command removes files or directories from the vfat or ext* partition of the
+ partitioned image:
+
+ $ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 11D0-DE21
+ Directory for ::/
+
+ libcom32 c32 186500 2017-06-02 15:15
+ libutil c32 24148 2017-06-02 15:15
+ syslinux cfg 209 2017-06-02 15:15
+ vesamenu c32 27104 2017-06-02 15:15
+ vmlinuz 6926384 2017-06-02 15:15
+ 5 files 7 164 345 bytes
+ 16 582 656 bytes free
+
+ $ wic rm ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/libutil.c32
+
+ $ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 11D0-DE21
+ Directory for ::/
+
+ libcom32 c32 186500 2017-06-02 15:15
+ syslinux cfg 209 2017-06-02 15:15
+ vesamenu c32 27104 2017-06-02 15:15
+ vmlinuz 6926384 2017-06-02 15:15
+ 4 files 7 140 197 bytes
+ 16 607 232 bytes free
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+
+ The -r option is used to remove directories and their contents
+ recursively,this only applies to ext* partition.
+"""
+
+wic_write_usage = """
+
+ Write image to a device
+
+ usage: wic write <image> <target device> [--expand [rules]] [--native-sysroot <path>]
+
+ This command writes partitioned image to a target device (USB stick, SD card etc).
+
+ See 'wic help write' for more detailed instructions.
+
+"""
+
+wic_write_help = """
+
+NAME
+ wic write - write an image to a device
+
+SYNOPSIS
+ wic write <image> <target>
+ wic write <image> <target> --expand auto
+ wic write <image> <target> --expand 1:100M,2:300M
+ wic write <image> <target> --native-sysroot <path>
+
+DESCRIPTION
+ This command writes an image to a target device (USB stick, SD card etc)
+
+ $ wic write ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic /dev/sdb
+
+ The --expand option is used to resize image partitions.
+ --expand auto expands partitions to occupy all free space available on the target device.
+ It's also possible to specify expansion rules in a format
+ <partition>:<size>[,<partition>:<size>...] for one or more partitions.
+ Specifying size 0 will keep partition unmodified.
+ Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
+ recommended to use size 0 for boot partition to keep image bootable.
+
+ The --native-sysroot option is used to specify the path to the native sysroot
+ containing the tools(parted, resize2fs) to use.
+"""
+
wic_plugins_help = """
NAME
@@ -344,6 +574,10 @@ DESCRIPTION
partition. In other words, it 'prepares' the final partition
image which will be incorporated into the disk image.
+ do_post_partition()
+ Called after the partition is created. It is useful to add post
+ operations e.g. signing the partition.
+
do_configure_partition()
Called before do_prepare_partition(), typically used to
create custom configuration files for a partition, for
@@ -371,12 +605,7 @@ DESCRIPTION
This scheme is extensible - adding more hooks is a simple matter
of adding more plugin methods to SourcePlugin and derived classes.
- The code that then needs to call the plugin methods uses
- plugin.get_source_plugin_methods() to find the method(s) needed by
- the call; this is done by filling up a dict with keys containing
- the method names of interest - on success, these will be filled in
- with the actual methods. Please see the implementation for
- examples and details.
+ Please see the implementation for details.
"""
wic_overview_help = """
@@ -635,8 +864,11 @@ DESCRIPTION
Partitions with a <mountpoint> specified will be automatically mounted.
This is achieved by wic adding entries to the fstab during image
generation. In order for a valid fstab to be generated one of the
- --ondrive, --ondisk or --use-uuid partition options must be used for
- each partition that specifies a mountpoint.
+ --ondrive, --ondisk, --use-uuid or --use-label partition options must
+ be used for each partition that specifies a mountpoint. Note that with
+ --use-{uuid,label} and non-root <mountpoint>, including swap, the mount
+ program must understand the PARTUUID or LABEL syntax. This currently
+ excludes the busybox versions of these applications.
The following are supported 'part' options:
@@ -646,6 +878,12 @@ DESCRIPTION
not specified, the size is in MB.
You do not need this option if you use --source.
+ --fixed-size: Exact partition size. Value format is the same
+ as for --size option. This option cannot be
+ specified along with --size. If partition data
+ is larger than --fixed-size and error will be
+ raised when assembling disk image.
+
--source: This option is a wic-specific option that names the
source of the data that will populate the
partition. The most common value for this option
@@ -684,6 +922,8 @@ DESCRIPTION
apply to partitions created using '--source rootfs' (see
--source above). Valid values are:
+ vfat
+ msdos
ext2
ext3
ext4
@@ -703,6 +943,14 @@ DESCRIPTION
label is already in use by another filesystem,
a new label is created for the partition.
+ --use-label: This option is specific to wic. It makes wic to use the
+ label in /etc/fstab to specify a partition. If the
+ --use-label and --use-uuid are used at the same time,
+ we prefer the uuid because it is less likely to cause
+ name confliction. We don't support using this parameter
+ on the root partition since it requires an initramfs to
+ parse this value and we do not currently support that.
+
--active: Marks the partition as active.
--align (in KBytes): This option is specific to wic and says
@@ -715,17 +963,27 @@ DESCRIPTION
partition table. It may be useful for
bootloaders.
+ --exclude-path: This option is specific to wic. It excludes the given
+ relative path from the resulting image. If the path
+ ends with a slash, only the content of the directory
+ is omitted, not the directory itself. This option only
+ has an effect with the rootfs source plugin.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
beyond the size specified by --size.
- By default, 10MB.
+ By default, 10MB. This option cannot be used
+ with --fixed-size option.
--overhead-factor: This option is specific to wic. The
size of the partition is multiplied by
this factor. It has to be greater than or
- equal to 1.
- The default value is 1.3.
+ equal to 1. The default value is 1.3.
+ This option cannot be used with --fixed-size
+ option.
+
+ --part-name: This option is specific to wic. It specifies name for GPT partitions.
--part-type: This option is specific to wic. It specifies partition
type GUID for GPT partitions.
@@ -741,10 +999,21 @@ DESCRIPTION
in bootloader configuration before running wic. In this case .wks file can
be generated or modified to set preconfigured parition UUID using this option.
+ --fsuuid: This option is specific to wic. It specifies filesystem UUID.
+ It's useful if preconfigured filesystem UUID is added to kernel command line
+ in bootloader configuration before running wic. In this case .wks file can
+ be generated or modified to set preconfigured filesystem UUID using this option.
+
--system-id: This option is specific to wic. It specifies partition system id. It's useful
for the harware that requires non-default partition system ids. The parameter
in one byte long hex number either with 0x prefix or without it.
+ --mkfs-extraopts: This option specifies extra options to pass to mkfs utility.
+ NOTE, that wic uses default options for some filesystems, for example
+ '-S 512' for mkfs.fat or '-F -i 8192' for mkfs.ext. Those options will
+ not take effect when --mkfs-extraopts is used. This should be taken into
+ account when using --mkfs-extraopts.
+
* bootloader
This command allows the user to specify various bootloader
@@ -782,3 +1051,67 @@ DESCRIPTION
.wks files.
"""
+
+wic_help_help = """
+NAME
+ wic help - display a help topic
+
+DESCRIPTION
+ Specify a help topic to display it. Topics are shown above.
+"""
+
+
+wic_help = """
+Creates a customized OpenEmbedded image.
+
+Usage: wic [--version]
+ wic help [COMMAND or TOPIC]
+ wic COMMAND [ARGS]
+
+ usage 1: Returns the current version of Wic
+ usage 2: Returns detailed help for a COMMAND or TOPIC
+ usage 3: Executes COMMAND
+
+
+COMMAND:
+
+ list - List available canned images and source plugins
+ ls - List contents of partitioned image or partition
+ rm - Remove files or directories from the vfat or ext* partitions
+ help - Show help for a wic COMMAND or TOPIC
+ write - Write an image to a device
+ cp - Copy files and directories to the vfat or ext* partitions
+ create - Create a new OpenEmbedded image
+
+
+TOPIC:
+ overview - Presents an overall overview of Wic
+ plugins - Presents an overview and API for Wic plugins
+ kickstart - Presents a Wic kicstart file reference
+
+
+Examples:
+
+ $ wic --version
+
+ Returns the current version of Wic
+
+
+ $ wic help cp
+
+ Returns the SYNOPSIS and DESCRIPTION for the Wic "cp" command.
+
+
+ $ wic list images
+
+ Returns the list of canned images (i.e. *.wks files located in
+ the /scripts/lib/wic/canned-wks directory.
+
+
+ $ wic create mkefidisk -e core-image-minimal
+
+ Creates an EFI disk image from artifacts used in a previous
+ core-image-minimal build in standard BitBake locations
+ (e.g. Cooked Mode).
+
+"""
diff --git a/scripts/lib/wic/imager/baseimager.py b/scripts/lib/wic/imager/baseimager.py
deleted file mode 100644
index 1a52dd8b4d..0000000000
--- a/scripts/lib/wic/imager/baseimager.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2007 Red Hat Inc.
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import tempfile
-import shutil
-
-from wic import msger
-from wic.utils.errors import CreatorError
-from wic.utils import runner
-
-class BaseImageCreator():
- """Base class for image creation.
-
- BaseImageCreator is the simplest creator class available; it will
- create a system image according to the supplied kickstart file.
-
- e.g.
-
- import wic.imgcreate as imgcreate
- ks = imgcreate.read_kickstart("foo.ks")
- imgcreate.ImageCreator(ks, "foo").create()
- """
-
- def __del__(self):
- self.cleanup()
-
- def __init__(self, createopts=None):
- """Initialize an ImageCreator instance.
-
- ks -- a pykickstart.KickstartParser instance; this instance will be
- used to drive the install by e.g. providing the list of packages
- to be installed, the system configuration and %post scripts
-
- name -- a name for the image; used for e.g. image filenames or
- filesystem labels
- """
-
- self.__builddir = None
-
- self.ks = None
- self.name = "target"
- self.tmpdir = "/var/tmp/wic"
- self.workdir = "/var/tmp/wic/build"
-
- # setup tmpfs tmpdir when enabletmpfs is True
- self.enabletmpfs = False
-
- if createopts:
- # Mapping table for variables that have different names.
- optmap = {"outdir" : "destdir",
- }
-
- # update setting from createopts
- for key in createopts:
- if key in optmap:
- option = optmap[key]
- else:
- option = key
- setattr(self, option, createopts[key])
-
- self.destdir = os.path.abspath(os.path.expanduser(self.destdir))
-
- self._dep_checks = ["ls", "bash", "cp", "echo"]
-
- # Output image file names
- self.outimage = []
-
- # No ks provided when called by convertor, so skip the dependency check
- if self.ks:
- # If we have btrfs partition we need to check necessary tools
- for part in self.ks.partitions:
- if part.fstype and part.fstype == "btrfs":
- self._dep_checks.append("mkfs.btrfs")
- break
-
- # make sure the specified tmpdir and cachedir exist
- if not os.path.exists(self.tmpdir):
- os.makedirs(self.tmpdir)
-
-
- #
- # Hooks for subclasses
- #
- def _create(self):
- """Create partitions for the disk image(s)
-
- This is the hook where subclasses may create the partitions
- that will be assembled into disk image(s).
-
- There is no default implementation.
- """
- pass
-
- def _cleanup(self):
- """Undo anything performed in _create().
-
- This is the hook where subclasses must undo anything which was
- done in _create().
-
- There is no default implementation.
-
- """
- pass
-
- #
- # Actual implementation
- #
- def __ensure_builddir(self):
- if not self.__builddir is None:
- return
-
- try:
- self.workdir = os.path.join(self.tmpdir, "build")
- if not os.path.exists(self.workdir):
- os.makedirs(self.workdir)
- self.__builddir = tempfile.mkdtemp(dir=self.workdir,
- prefix="imgcreate-")
- except OSError as err:
- raise CreatorError("Failed create build directory in %s: %s" %
- (self.tmpdir, err))
-
- def __setup_tmpdir(self):
- if not self.enabletmpfs:
- return
-
- runner.show('mount -t tmpfs -o size=4G tmpfs %s' % self.workdir)
-
- def __clean_tmpdir(self):
- if not self.enabletmpfs:
- return
-
- runner.show('umount -l %s' % self.workdir)
-
- def create(self):
- """Create partitions for the disk image(s)
-
- Create the partitions that will be assembled into disk
- image(s).
- """
- self.__setup_tmpdir()
- self.__ensure_builddir()
-
- self._create()
-
- def cleanup(self):
- """Undo anything performed in create().
-
- Note, make sure to call this method once finished with the creator
- instance in order to ensure no stale files are left on the host e.g.:
-
- creator = ImageCreator(ks, name)
- try:
- creator.create()
- finally:
- creator.cleanup()
-
- """
- if not self.__builddir:
- return
-
- self._cleanup()
-
- shutil.rmtree(self.__builddir, ignore_errors=True)
- self.__builddir = None
-
- self.__clean_tmpdir()
-
-
- def print_outimage_info(self):
- msg = "The new image can be found here:\n"
- self.outimage.sort()
- for path in self.outimage:
- msg += ' %s\n' % os.path.abspath(path)
-
- msger.info(msg)
diff --git a/scripts/lib/wic/imager/direct.py b/scripts/lib/wic/imager/direct.py
deleted file mode 100644
index ffde232c2b..0000000000
--- a/scripts/lib/wic/imager/direct.py
+++ /dev/null
@@ -1,407 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# DESCRIPTION
-# This implements the 'direct' image creator class for 'wic'
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
-
-import os
-import shutil
-
-from wic import msger
-from wic.utils.oe.misc import get_bitbake_var
-from wic.utils.partitionedfs import Image
-from wic.utils.errors import CreatorError, ImageError
-from wic.imager.baseimager import BaseImageCreator
-from wic.plugin import pluginmgr
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-
-disk_methods = {
- "do_install_disk":None,
-}
-
-class DiskImage():
- """
- A Disk backed by a file.
- """
- def __init__(self, device, size):
- self.size = size
- self.device = device
- self.created = False
-
- def exists(self):
- return os.path.exists(self.device)
-
- def create(self):
- if self.created:
- return
- # create sparse disk image
- cmd = "truncate %s -s %s" % (self.device, self.size)
- exec_cmd(cmd)
- self.created = True
-
-class DirectImageCreator(BaseImageCreator):
- """
- Installs a system into a file containing a partitioned disk image.
-
- DirectImageCreator is an advanced ImageCreator subclass; an image
- file is formatted with a partition table, each partition created
- from a rootfs or other OpenEmbedded build artifact and dd'ed into
- the virtual disk. The disk image can subsequently be dd'ed onto
- media and used on actual hardware.
- """
-
- def __init__(self, oe_builddir, image_output_dir, rootfs_dir, bootimg_dir,
- kernel_dir, native_sysroot, compressor, creatoropts=None,
- bmap=False):
- """
- Initialize a DirectImageCreator instance.
-
- This method takes the same arguments as ImageCreator.__init__()
- """
- BaseImageCreator.__init__(self, creatoropts)
-
- self.__image = None
- self.__disks = {}
- self.__disk_format = "direct"
- self._disk_names = []
- self.ptable_format = self.ks.bootloader.ptable
-
- self.oe_builddir = oe_builddir
- if image_output_dir:
- self.tmpdir = image_output_dir
- self.rootfs_dir = rootfs_dir
- self.bootimg_dir = bootimg_dir
- self.kernel_dir = kernel_dir
- self.native_sysroot = native_sysroot
- self.compressor = compressor
- self.bmap = bmap
-
- def __get_part_num(self, num, parts):
- """calculate the real partition number, accounting for partitions not
- in the partition table and logical partitions
- """
- realnum = 0
- for pnum, part in enumerate(parts, 1):
- if not part.no_table:
- realnum += 1
- if pnum == num:
- if part.no_table:
- return 0
- if self.ptable_format == 'msdos' and realnum > 3:
- # account for logical partition numbering, ex. sda5..
- return realnum + 1
- return realnum
-
- def _write_fstab(self, image_rootfs):
- """overriden to generate fstab (temporarily) in rootfs. This is called
- from _create, make sure it doesn't get called from
- BaseImage.create()
- """
- if not image_rootfs:
- return
-
- fstab_path = image_rootfs + "/etc/fstab"
- if not os.path.isfile(fstab_path):
- return
-
- with open(fstab_path) as fstab:
- fstab_lines = fstab.readlines()
-
- if self._update_fstab(fstab_lines, self._get_parts()):
- shutil.copyfile(fstab_path, fstab_path + ".orig")
-
- with open(fstab_path, "w") as fstab:
- fstab.writelines(fstab_lines)
-
- return fstab_path
-
- def _update_fstab(self, fstab_lines, parts):
- """Assume partition order same as in wks"""
- updated = False
- for num, part in enumerate(parts, 1):
- pnum = self.__get_part_num(num, parts)
- if not pnum or not part.mountpoint \
- or part.mountpoint in ("/", "/boot"):
- continue
-
- # mmc device partitions are named mmcblk0p1, mmcblk0p2..
- prefix = 'p' if part.disk.startswith('mmcblk') else ''
- device_name = "/dev/%s%s%d" % (part.disk, prefix, pnum)
-
- opts = part.fsopts if part.fsopts else "defaults"
- line = "\t".join([device_name, part.mountpoint, part.fstype,
- opts, "0", "0"]) + "\n"
-
- fstab_lines.append(line)
- updated = True
-
- return updated
-
- def set_bootimg_dir(self, bootimg_dir):
- """
- Accessor for bootimg_dir, the actual location used for the source
- of the bootimg. Should be set by source plugins (only if they
- change the default bootimg source) so the correct info gets
- displayed for print_outimage_info().
- """
- self.bootimg_dir = bootimg_dir
-
- def _get_parts(self):
- if not self.ks:
- raise CreatorError("Failed to get partition info, "
- "please check your kickstart setting.")
-
- # Set a default partition if no partition is given out
- if not self.ks.partitions:
- partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
- args = partstr.split()
- part = self.ks.parse(args[1:])
- if part not in self.ks.partitions:
- self.ks.partitions.append(part)
-
- # partitions list from kickstart file
- return self.ks.partitions
-
- def get_disk_names(self):
- """ Returns a list of physical target disk names (e.g., 'sdb') which
- will be created. """
-
- if self._disk_names:
- return self._disk_names
-
- #get partition info from ks handler
- parts = self._get_parts()
-
- for i in range(len(parts)):
- if parts[i].disk:
- disk_name = parts[i].disk
- else:
- raise CreatorError("Failed to create disks, no --ondisk "
- "specified in partition line of ks file")
-
- if parts[i].mountpoint and not parts[i].fstype:
- raise CreatorError("Failed to create disks, no --fstype "
- "specified for partition with mountpoint "
- "'%s' in the ks file")
-
- self._disk_names.append(disk_name)
-
- return self._disk_names
-
- def _full_name(self, name, extention):
- """ Construct full file name for a file we generate. """
- return "%s-%s.%s" % (self.name, name, extention)
-
- def _full_path(self, path, name, extention):
- """ Construct full file path to a file we generate. """
- return os.path.join(path, self._full_name(name, extention))
-
- def get_default_source_plugin(self):
- """
- The default source plugin i.e. the plugin that's consulted for
- overall image generation tasks outside of any particular
- partition. For convenience, we just hang it off the
- bootloader handler since it's the one non-partition object in
- any setup. By default the default plugin is set to the same
- plugin as the /boot partition; since we hang it off the
- bootloader object, the default can be explicitly set using the
- --source bootloader param.
- """
- return self.ks.bootloader.source
-
- #
- # Actual implemention
- #
- def _create(self):
- """
- For 'wic', we already have our build artifacts - we just create
- filesystems from the artifacts directly and combine them into
- a partitioned image.
- """
- parts = self._get_parts()
-
- self.__image = Image(self.native_sysroot)
-
- for part in parts:
- # as a convenience, set source to the boot partition source
- # instead of forcing it to be set via bootloader --source
- if not self.ks.bootloader.source and part.mountpoint == "/boot":
- self.ks.bootloader.source = part.source
-
- fstab_path = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
-
- shutil.rmtree(self.workdir)
- os.mkdir(self.workdir)
-
- for part in parts:
- # get rootfs size from bitbake variable if it's not set in .ks file
- if not part.size:
- # and if rootfs name is specified for the partition
- image_name = part.rootfs_dir
- if image_name:
- # Bitbake variable ROOTFS_SIZE is calculated in
- # Image._get_rootfs_size method from meta/lib/oe/image.py
- # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
- # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
- rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
- if rsize_bb:
- part.size = int(round(float(rsize_bb)))
- # need to create the filesystems in order to get their
- # sizes before we can add them and do the layout.
- # Image.create() actually calls __format_disks() to create
- # the disk images and carve out the partitions, then
- # self.assemble() calls Image.assemble() which calls
- # __write_partitition() for each partition to dd the fs
- # into the partitions.
- part.prepare(self, self.workdir, self.oe_builddir, self.rootfs_dir,
- self.bootimg_dir, self.kernel_dir, self.native_sysroot)
-
-
- self.__image.add_partition(int(part.size),
- part.disk,
- part.mountpoint,
- part.source_file,
- part.fstype,
- part.label,
- fsopts=part.fsopts,
- boot=part.active,
- align=part.align,
- no_table=part.no_table,
- part_type=part.part_type,
- uuid=part.uuid,
- system_id=part.system_id)
-
- if fstab_path:
- shutil.move(fstab_path + ".orig", fstab_path)
-
- self.__image.layout_partitions(self.ptable_format)
-
- self.__imgdir = self.workdir
- for disk_name, disk in self.__image.disks.items():
- full_path = self._full_path(self.__imgdir, disk_name, "direct")
- msger.debug("Adding disk %s as %s with size %s bytes" \
- % (disk_name, full_path, disk['min_size']))
- disk_obj = DiskImage(full_path, disk['min_size'])
- self.__disks[disk_name] = disk_obj
- self.__image.add_disk(disk_name, disk_obj)
-
- self.__image.create()
-
- def assemble(self):
- """
- Assemble partitions into disk image(s)
- """
- for disk_name, disk in self.__image.disks.items():
- full_path = self._full_path(self.__imgdir, disk_name, "direct")
- msger.debug("Assembling disk %s as %s with size %s bytes" \
- % (disk_name, full_path, disk['min_size']))
- self.__image.assemble(full_path)
-
- def finalize(self):
- """
- Finalize the disk image.
-
- For example, prepare the image to be bootable by e.g.
- creating and installing a bootloader configuration.
-
- """
- source_plugin = self.get_default_source_plugin()
- if source_plugin:
- self._source_methods = pluginmgr.get_source_plugin_methods(source_plugin, disk_methods)
- for disk_name, disk in self.__image.disks.items():
- self._source_methods["do_install_disk"](disk, disk_name, self,
- self.workdir,
- self.oe_builddir,
- self.bootimg_dir,
- self.kernel_dir,
- self.native_sysroot)
-
- for disk_name, disk in self.__image.disks.items():
- full_path = self._full_path(self.__imgdir, disk_name, "direct")
- # Generate .bmap
- if self.bmap:
- msger.debug("Generating bmap file for %s" % disk_name)
- exec_native_cmd("bmaptool create %s -o %s.bmap" % (full_path, full_path),
- self.native_sysroot)
- # Compress the image
- if self.compressor:
- msger.debug("Compressing disk %s with %s" % (disk_name, self.compressor))
- exec_cmd("%s %s" % (self.compressor, full_path))
-
- def print_outimage_info(self):
- """
- Print the image(s) and artifacts used, for the user.
- """
- msg = "The new image(s) can be found here:\n"
-
- parts = self._get_parts()
-
- for disk_name in self.__image.disks:
- extension = "direct" + {"gzip": ".gz",
- "bzip2": ".bz2",
- "xz": ".xz",
- "": ""}.get(self.compressor)
- full_path = self._full_path(self.__imgdir, disk_name, extension)
- msg += ' %s\n\n' % full_path
-
- msg += 'The following build artifacts were used to create the image(s):\n'
- for part in parts:
- if part.rootfs_dir is None:
- continue
- if part.mountpoint == '/':
- suffix = ':'
- else:
- suffix = '["%s"]:' % (part.mountpoint or part.label)
- msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), part.rootfs_dir)
-
- msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
- msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
- msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
-
- msger.info(msg)
-
- @property
- def rootdev(self):
- """
- Get root device name to use as a 'root' parameter
- in kernel command line.
-
- Assume partition order same as in wks
- """
- parts = self._get_parts()
- for num, part in enumerate(parts, 1):
- if part.mountpoint == "/":
- if part.uuid:
- return "PARTUUID=%s" % part.uuid
- else:
- suffix = 'p' if part.disk.startswith('mmcblk') else ''
- pnum = self.__get_part_num(num, parts)
- return "/dev/%s%s%-d" % (part.disk, suffix, pnum)
-
- def _cleanup(self):
- if not self.__image is None:
- try:
- self.__image.cleanup()
- except ImageError as err:
- msger.warning("%s" % err)
-
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index 0894e2b199..6a643ba3af 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -1,21 +1,8 @@
-#!/usr/bin/env python -tt
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides parser for kickstart format
@@ -27,11 +14,30 @@
import os
import shlex
+import logging
+import re
+
from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
-from wic import msger
+from wic.engine import find_canned
from wic.partition import Partition
-from wic.utils.misc import find_canned
+from wic.misc import get_bitbake_var
+
+logger = logging.getLogger('wic')
+
+__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
+
+def expand_line(line):
+ while True:
+ m = __expand_var_regexp__.search(line)
+ if not m:
+ return line
+ key = m.group()[2:-1]
+ val = get_bitbake_var(key)
+ if val is None:
+ logger.warning("cannot expand variable %s" % key)
+ return line
+ line = line[:m.start()] + val + line[m.end():]
class KickStartError(Exception):
"""Custom exception."""
@@ -111,7 +117,10 @@ def systemidtype(arg):
return arg
class KickStart():
- """"Kickstart parser implementation."""
+ """Kickstart parser implementation."""
+
+ DEFAULT_EXTRA_SPACE = 10*1024
+ DEFAULT_OVERHEAD_FACTOR = 1.3
def __init__(self, confpath):
@@ -127,21 +136,38 @@ class KickStart():
part.add_argument('mountpoint', nargs='?')
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
- part.add_argument("--extra-space", type=sizetype, default=10*1024)
+ part.add_argument('--exclude-path', nargs='+')
+ part.add_argument("--extra-space", type=sizetype)
part.add_argument('--fsoptions', dest='fsopts')
- part.add_argument('--fstype')
+ part.add_argument('--fstype', default='vfat',
+ choices=('ext2', 'ext3', 'ext4', 'btrfs',
+ 'squashfs', 'vfat', 'msdos', 'swap'))
+ part.add_argument('--mkfs-extraopts', default='')
part.add_argument('--label')
+ part.add_argument('--use-label', action='store_true')
part.add_argument('--no-table', action='store_true')
- part.add_argument('--ondisk', '--ondrive', dest='disk')
- part.add_argument("--overhead-factor", type=overheadtype, default=1.3)
+ part.add_argument('--ondisk', '--ondrive', dest='disk', default='sda')
+ part.add_argument("--overhead-factor", type=overheadtype)
+ part.add_argument('--part-name')
part.add_argument('--part-type')
part.add_argument('--rootfs-dir')
- part.add_argument('--size', type=sizetype, default=0)
+ part.add_argument('--type', default='primary',
+ choices = ('primary', 'logical'))
+
+ # --size and --fixed-size cannot be specified together; options
+ # ----extra-space and --overhead-factor should also raise a parser
+ # --error, but since nesting mutually exclusive groups does not work,
+ # ----extra-space/--overhead-factor are handled later
+ sizeexcl = part.add_mutually_exclusive_group()
+ sizeexcl.add_argument('--size', type=sizetype, default=0)
+ sizeexcl.add_argument('--fixed-size', type=sizetype, default=0)
+
part.add_argument('--source')
part.add_argument('--sourceparams')
part.add_argument('--system-id', type=systemidtype)
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
+ part.add_argument('--fsuuid')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
@@ -156,7 +182,7 @@ class KickStart():
self._parse(parser, confpath)
if not self.bootloader:
- msger.warning('bootloader config not specified, using defaults')
+ logger.warning('bootloader config not specified, using defaults\n')
self.bootloader = bootloader.parse_args([])
def _parse(self, parser, confpath):
@@ -169,12 +195,49 @@ class KickStart():
line = line.strip()
lineno += 1
if line and line[0] != '#':
+ line = expand_line(line)
try:
- parsed = parser.parse_args(shlex.split(line))
+ line_args = shlex.split(line)
+ parsed = parser.parse_args(line_args)
except ArgumentError as err:
raise KickStartError('%s:%d: %s' % \
(confpath, lineno, err))
if line.startswith('part'):
+ # SquashFS does not support filesystem UUID
+ if parsed.fstype == 'squashfs':
+ if parsed.fsuuid:
+ err = "%s:%d: SquashFS does not support UUID" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.label:
+ err = "%s:%d: SquashFS does not support LABEL" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.use_label and not parsed.label:
+ err = "%s:%d: Must set the label with --label" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ # using ArgumentParser one cannot easily tell if option
+ # was passed as argument, if said option has a default
+ # value; --overhead-factor/--extra-space cannot be used
+ # with --fixed-size, so at least detect when these were
+ # passed with non-0 values ...
+ if parsed.fixed_size:
+ if parsed.overhead_factor or parsed.extra_space:
+ err = "%s:%d: arguments --overhead-factor and --extra-space not "\
+ "allowed with argument --fixed-size" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ else:
+ # ... and provide defaults if not using
+ # --fixed-size iff given option was not used
+ # (again, one cannot tell if option was passed but
+ # with value equal to 0)
+ if '--overhead-factor' not in line_args:
+ parsed.overhead_factor = self.DEFAULT_OVERHEAD_FACTOR
+ if '--extra-space' not in line_args:
+ parsed.extra_space = self.DEFAULT_EXTRA_SPACE
+
self.partnum += 1
self.partitions.append(Partition(parsed, self.partnum))
elif line.startswith('include'):
diff --git a/scripts/lib/wic/utils/oe/misc.py b/scripts/lib/wic/misc.py
index fe188c9d26..1f199b9f23 100644
--- a/scripts/lib/wic/utils/oe/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides a place to collect various wic-related utils
@@ -26,17 +12,28 @@
#
"""Miscellaneous functions."""
+import logging
import os
+import re
+import subprocess
+
from collections import defaultdict
from distutils import spawn
-from wic import msger
-from wic.utils import runner
+from wic import WicError
+
+logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+ "grub-mkimage": "grub-efi",
+ "isohybrid": "syslinux",
"mcopy": "mtools",
+ "mdel" : "mtools",
+ "mdeltree" : "mtools",
+ "mdir" : "mtools",
"mkdosfs": "dosfstools",
+ "mkisofs": "cdrtools",
"mkfs.btrfs": "btrfs-tools",
"mkfs.ext2": "e2fsprogs",
"mkfs.ext3": "e2fsprogs",
@@ -44,48 +41,79 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"mkfs.vfat": "dosfstools",
"mksquashfs": "squashfs-tools",
"mkswap": "util-linux",
+ "mmd": "mtools",
"parted": "parted",
"sfdisk": "util-linux",
"sgdisk": "gptfdisk",
"syslinux": "syslinux"
}
-def _exec_cmd(cmd_and_args, as_shell=False, catch=3):
+def runtool(cmdln_or_args):
+ """ wrapper for most of the subprocess calls
+ input:
+ cmdln_or_args: can be both args and cmdln str (shell=True)
+ return:
+ rc, output
+ """
+ if isinstance(cmdln_or_args, list):
+ cmd = cmdln_or_args[0]
+ shell = False
+ else:
+ import shlex
+ cmd = shlex.split(cmdln_or_args)[0]
+ shell = True
+
+ sout = subprocess.PIPE
+ serr = subprocess.STDOUT
+
+ try:
+ process = subprocess.Popen(cmdln_or_args, stdout=sout,
+ stderr=serr, shell=shell)
+ sout, serr = process.communicate()
+ # combine stdout and stderr, filter None out and decode
+ out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
+ except OSError as err:
+ if err.errno == 2:
+ # [Errno 2] No such file or directory
+ raise WicError('Cannot run command: %s, lost dependency?' % cmd)
+ else:
+ raise # relay
+
+ return process.returncode, out
+
+def _exec_cmd(cmd_and_args, as_shell=False):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
- msger.debug("_exec_cmd: %s" % cmd_and_args)
+ logger.debug("_exec_cmd: %s", cmd_and_args)
args = cmd_and_args.split()
- msger.debug(args)
+ logger.debug(args)
if as_shell:
- ret, out = runner.runtool(cmd_and_args, catch)
+ ret, out = runtool(cmd_and_args)
else:
- ret, out = runner.runtool(args, catch)
+ ret, out = runtool(args)
out = out.strip()
- msger.debug("_exec_cmd: output for %s (rc = %d): %s" % \
- (cmd_and_args, ret, out))
+ if ret != 0:
+ raise WicError("_exec_cmd: %s returned '%s' instead of 0\noutput: %s" % \
+ (cmd_and_args, ret, out))
- return (ret, out)
+ logger.debug("_exec_cmd: output for %s (rc = %d): %s",
+ cmd_and_args, ret, out)
+ return ret, out
-def exec_cmd(cmd_and_args, as_shell=False, catch=3):
- """
- Execute command, catching stderr, stdout
- Exits if rc non-zero
+def exec_cmd(cmd_and_args, as_shell=False):
"""
- ret, out = _exec_cmd(cmd_and_args, as_shell, catch)
-
- if ret != 0:
- msger.error("exec_cmd: %s returned '%s' instead of 0" % \
- (cmd_and_args, ret))
+ Execute command, return output
+ """
+ return _exec_cmd(cmd_and_args, as_shell)[1]
- return out
-def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
+def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
Execute native command, catching stderr, stdout
@@ -95,22 +123,24 @@ def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
"""
# The reason -1 is used is because there may be "export" commands.
args = cmd_and_args.split(';')[-1].split()
- msger.debug(args)
+ logger.debug(args)
if pseudo:
cmd_and_args = pseudo + cmd_and_args
- native_paths = \
- "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
- (native_sysroot, native_sysroot, native_sysroot)
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+ (native_sysroot, native_sysroot, native_sysroot)
+
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
- (native_paths, cmd_and_args)
- msger.debug("exec_native_cmd: %s" % cmd_and_args)
+ (native_paths, cmd_and_args)
+ logger.debug("exec_native_cmd: %s", native_cmd_and_args)
# If the command isn't in the native sysroot say we failed.
if spawn.find_executable(args[0], native_paths):
- ret, out = _exec_cmd(native_cmd_and_args, True, catch)
+ ret, out = _exec_cmd(native_cmd_and_args, True)
else:
ret = 127
+ out = "can't find native executable %s in %s" % (args[0], native_paths)
prog = args[0]
# shell command-not-found
@@ -120,18 +150,12 @@ def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
"was not found (see details above).\n\n" % prog
recipe = NATIVE_RECIPES.get(prog)
if recipe:
- msg += "Please bake it with 'bitbake %s-native' "\
- "and try again.\n" % recipe
+ msg += "Please make sure wic-tools have %s-native in its DEPENDS, "\
+ "build it with 'bitbake wic-tools' and try again.\n" % recipe
else:
msg += "Wic failed to find a recipe to build native %s. Please "\
"file a bug against wic.\n" % prog
- msger.error(msg)
- if out:
- msger.debug('"%s" output: %s' % (args[0], out))
-
- if ret != 0:
- msger.error("exec_cmd: '%s' returned '%s' instead of 0" % \
- (cmd_and_args, ret))
+ raise WicError(msg)
return ret, out
@@ -148,23 +172,20 @@ class BitbakeVars(defaultdict):
self.default_image = None
self.vars_dir = None
- def _parse_line(self, line, image):
+ def _parse_line(self, line, image, matcher=re.compile(r"^([a-zA-Z0-9\-_+./~]+)=(.*)")):
"""
Parse one line from bitbake -e output or from .env file.
Put result key-value pair into the storage.
"""
if "=" not in line:
return
- try:
- key, val = line.split("=")
- except ValueError:
+ match = matcher.match(line)
+ if not match:
return
- key = key.strip()
- val = val.strip()
- if key.replace('_', '').isalnum():
- self[image][key] = val.strip('"')
+ key, val = match.groups()
+ self[image][key] = val.strip('"')
- def get_var(self, var, image=None):
+ def get_var(self, var, image=None, cache=True):
"""
Get bitbake variable from 'bitbake -e' output or from .env file.
This is a lazy method, i.e. it runs bitbake or parses file only when
@@ -191,14 +212,14 @@ class BitbakeVars(defaultdict):
if image:
cmd += " %s" % image
- log_level = msger.get_loglevel()
- msger.set_loglevel('normal')
+ log_level = logger.getEffectiveLevel()
+ logger.setLevel(logging.INFO)
ret, lines = _exec_cmd(cmd)
- msger.set_loglevel(log_level)
+ logger.setLevel(log_level)
if ret:
- print("Couldn't get '%s' output." % cmd)
- print("Bitbake failed with error:\n%s\n" % lines)
+ logger.error("Couldn't get '%s' output.", cmd)
+ logger.error("Bitbake failed with error:\n%s\n", lines)
return
# Parse bitbake -e output
@@ -206,41 +227,23 @@ class BitbakeVars(defaultdict):
self._parse_line(line, image)
# Make first image a default set of variables
- images = [key for key in self if key]
- if len(images) == 1:
- self[None] = self[image]
+ if cache:
+ images = [key for key in self if key]
+ if len(images) == 1:
+ self[None] = self[image]
+
+ result = self[image].get(var)
+ if not cache:
+ self.pop(image, None)
- return self[image].get(var)
+ return result
# Create BB_VARS singleton
BB_VARS = BitbakeVars()
-def get_bitbake_var(var, image=None):
+def get_bitbake_var(var, image=None, cache=True):
"""
Provide old get_bitbake_var API by wrapping
get_var method of BB_VARS singleton.
"""
- return BB_VARS.get_var(var, image)
-
-def parse_sourceparams(sourceparams):
- """
- Split sourceparams string of the form key1=val1[,key2=val2,...]
- into a dict. Also accepts valueless keys i.e. without =.
-
- Returns dict of param key/val pairs (note that val may be None).
- """
- params_dict = {}
-
- params = sourceparams.split(',')
- if params:
- for par in params:
- if not par:
- continue
- if not '=' in par:
- key = par
- val = None
- else:
- key, val = par.split('=')
- params_dict[key] = val
-
- return params_dict
+ return BB_VARS.get_var(var, image, cache)
diff --git a/scripts/lib/wic/msger.py b/scripts/lib/wic/msger.py
deleted file mode 100644
index fb8336d94a..0000000000
--- a/scripts/lib/wic/msger.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/env python -tt
-# vim: ai ts=4 sts=4 et sw=4
-#
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import sys
-import re
-import time
-
-__ALL__ = ['get_loglevel',
- 'set_loglevel',
- 'set_logfile',
- 'debug',
- 'verbose',
- 'info',
- 'warning',
- 'error',
- ]
-
-# COLORs in ANSI
-INFO_COLOR = 32 # green
-WARN_COLOR = 33 # yellow
-ERR_COLOR = 31 # red
-ASK_COLOR = 34 # blue
-NO_COLOR = 0
-
-PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S)
-
-INTERACTIVE = True
-
-LOG_LEVEL = 1
-LOG_LEVELS = {
- 'quiet': 0,
- 'normal': 1,
- 'verbose': 2,
- 'debug': 3,
- 'never': 4,
-}
-
-LOG_FILE_FP = None
-LOG_CONTENT = ''
-CATCHERR_BUFFILE_FD = -1
-CATCHERR_BUFFILE_PATH = None
-CATCHERR_SAVED_2 = -1
-
-def _general_print(head, color, msg=None, stream=None, level='normal'):
- global LOG_CONTENT
- if not stream:
- stream = sys.stdout
-
- if LOG_LEVELS[level] > LOG_LEVEL:
- # skip
- return
-
- errormsg = ''
- if CATCHERR_BUFFILE_FD > 0:
- size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
- os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
- errormsg = os.read(CATCHERR_BUFFILE_FD, size)
- os.ftruncate(CATCHERR_BUFFILE_FD, 0)
-
- # append error msg to LOG
- if errormsg:
- LOG_CONTENT += errormsg
-
- # append normal msg to LOG
- save_msg = msg.strip() if msg else None
- if save_msg:
- timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
- LOG_CONTENT += timestr + save_msg + '\n'
-
- if errormsg:
- _color_print('', NO_COLOR, errormsg, stream, level)
-
- _color_print(head, color, msg, stream, level)
-
-def _color_print(head, color, msg, stream, level):
- colored = True
- if color == NO_COLOR or \
- not stream.isatty() or \
- os.getenv('ANSI_COLORS_DISABLED') is not None:
- colored = False
-
- if head.startswith('\r'):
- # need not \n at last
- newline = False
- else:
- newline = True
-
- if colored:
- head = '\033[%dm%s:\033[0m ' %(color, head)
- if not newline:
- # ESC cmd to clear line
- head = '\033[2K' + head
- else:
- if head:
- head += ': '
- if head.startswith('\r'):
- head = head.lstrip()
- newline = True
-
- if msg is not None:
- stream.write('%s%s' % (head, msg))
- if newline:
- stream.write('\n')
-
- stream.flush()
-
-def _color_perror(head, color, msg, level='normal'):
- if CATCHERR_BUFFILE_FD > 0:
- _general_print(head, color, msg, sys.stdout, level)
- else:
- _general_print(head, color, msg, sys.stderr, level)
-
-def _split_msg(head, msg):
- if isinstance(msg, list):
- msg = '\n'.join(map(str, msg))
-
- if msg.startswith('\n'):
- # means print \n at first
- msg = msg.lstrip()
- head = '\n' + head
-
- elif msg.startswith('\r'):
- # means print \r at first
- msg = msg.lstrip()
- head = '\r' + head
-
- match = PREFIX_RE.match(msg)
- if match:
- head += ' <%s>' % match.group(1)
- msg = match.group(2)
-
- return head, msg
-
-def get_loglevel():
- return next((k for k, v in LOG_LEVELS.items() if v == LOG_LEVEL))
-
-def set_loglevel(level):
- global LOG_LEVEL
- if level not in LOG_LEVELS:
- # no effect
- return
-
- LOG_LEVEL = LOG_LEVELS[level]
-
-def set_interactive(mode=True):
- global INTERACTIVE
- if mode:
- INTERACTIVE = True
- else:
- INTERACTIVE = False
-
-def log(msg=''):
- # log msg to LOG_CONTENT then save to logfile
- global LOG_CONTENT
- if msg:
- LOG_CONTENT += msg
-
-def info(msg):
- head, msg = _split_msg('Info', msg)
- _general_print(head, INFO_COLOR, msg)
-
-def verbose(msg):
- head, msg = _split_msg('Verbose', msg)
- _general_print(head, INFO_COLOR, msg, level='verbose')
-
-def warning(msg):
- head, msg = _split_msg('Warning', msg)
- _color_perror(head, WARN_COLOR, msg)
-
-def debug(msg):
- head, msg = _split_msg('Debug', msg)
- _color_perror(head, ERR_COLOR, msg, level='debug')
-
-def error(msg):
- head, msg = _split_msg('Error', msg)
- _color_perror(head, ERR_COLOR, msg)
- sys.exit(1)
-
-def set_logfile(fpath):
- global LOG_FILE_FP
-
- def _savelogf():
- if LOG_FILE_FP:
- with open(LOG_FILE_FP, 'w') as log:
- log.write(LOG_CONTENT)
-
- if LOG_FILE_FP is not None:
- warning('duplicate log file configuration')
-
- LOG_FILE_FP = fpath
-
- import atexit
- atexit.register(_savelogf)
-
-def enable_logstderr(fpath):
- global CATCHERR_BUFFILE_FD
- global CATCHERR_BUFFILE_PATH
- global CATCHERR_SAVED_2
-
- if os.path.exists(fpath):
- os.remove(fpath)
- CATCHERR_BUFFILE_PATH = fpath
- CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT)
- CATCHERR_SAVED_2 = os.dup(2)
- os.dup2(CATCHERR_BUFFILE_FD, 2)
-
-def disable_logstderr():
- global CATCHERR_BUFFILE_FD
- global CATCHERR_BUFFILE_PATH
- global CATCHERR_SAVED_2
-
- raw(msg=None) # flush message buffer and print it.
- os.dup2(CATCHERR_SAVED_2, 2)
- os.close(CATCHERR_SAVED_2)
- os.close(CATCHERR_BUFFILE_FD)
- os.unlink(CATCHERR_BUFFILE_PATH)
- CATCHERR_BUFFILE_FD = -1
- CATCHERR_BUFFILE_PATH = None
- CATCHERR_SAVED_2 = -1
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index 30cd320a30..d809408e1a 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013-2016 Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
@@ -24,19 +10,15 @@
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
+import logging
import os
-import tempfile
import uuid
-from wic.utils.oe.misc import msger, parse_sourceparams
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-from wic.plugin import pluginmgr
+from wic import WicError
+from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+from wic.pluginbase import PluginMgr
-partition_methods = {
- "do_stage_partition":None,
- "do_prepare_partition":None,
- "do_configure_partition":None,
-}
+logger = logging.getLogger('wic')
class Partition():
@@ -45,27 +27,33 @@ class Partition():
self.active = args.active
self.align = args.align
self.disk = args.disk
+ self.device = None
self.extra_space = args.extra_space
+ self.exclude_path = args.exclude_path
self.fsopts = args.fsopts
self.fstype = args.fstype
self.label = args.label
+ self.use_label = args.use_label
+ self.mkfs_extraopts = args.mkfs_extraopts
self.mountpoint = args.mountpoint
self.no_table = args.no_table
+ self.num = None
self.overhead_factor = args.overhead_factor
+ self.part_name = args.part_name
self.part_type = args.part_type
self.rootfs_dir = args.rootfs_dir
self.size = args.size
+ self.fixed_size = args.fixed_size
self.source = args.source
self.sourceparams = args.sourceparams
self.system_id = args.system_id
self.use_uuid = args.use_uuid
self.uuid = args.uuid
- if args.use_uuid and not self.uuid:
- self.uuid = str(uuid.uuid4())
+ self.fsuuid = args.fsuuid
+ self.type = args.type
self.lineno = lineno
self.source_file = ""
- self.sourceparams_dict = {}
def get_extra_block_count(self, current_blocks):
"""
@@ -74,132 +62,184 @@ class Partition():
number of (1k) blocks we need to add to get to --size, 0 if
we're already there or beyond.
"""
- msger.debug("Requested partition size for %s: %d" % \
- (self.mountpoint, self.size))
+ logger.debug("Requested partition size for %s: %d",
+ self.mountpoint, self.size)
if not self.size:
return 0
requested_blocks = self.size
- msger.debug("Requested blocks %d, current_blocks %d" % \
- (requested_blocks, current_blocks))
+ logger.debug("Requested blocks %d, current_blocks %d",
+ requested_blocks, current_blocks)
if requested_blocks > current_blocks:
return requested_blocks - current_blocks
else:
return 0
+ def get_rootfs_size(self, actual_rootfs_size=0):
+ """
+ Calculate the required size of rootfs taking into consideration
+ --size/--fixed-size flags as well as overhead and extra space, as
+ specified in kickstart file. Raises an error if the
+ `actual_rootfs_size` is larger than fixed-size rootfs.
+
+ """
+ if self.fixed_size:
+ rootfs_size = self.fixed_size
+ if actual_rootfs_size > rootfs_size:
+ raise WicError("Actual rootfs size (%d kB) is larger than "
+ "allowed size %d kB" %
+ (actual_rootfs_size, rootfs_size))
+ else:
+ extra_blocks = self.get_extra_block_count(actual_rootfs_size)
+ if extra_blocks < self.extra_space:
+ extra_blocks = self.extra_space
+
+ rootfs_size = actual_rootfs_size + extra_blocks
+ rootfs_size *= self.overhead_factor
+
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, self.mountpoint, rootfs_size)
+
+ return rootfs_size
+
+ @property
+ def disk_size(self):
+ """
+ Obtain on-disk size of partition taking into consideration
+ --size/--fixed-size options.
+
+ """
+ return self.fixed_size if self.fixed_size else self.size
+
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
- if self.sourceparams:
- self.sourceparams_dict = parse_sourceparams(self.sourceparams)
-
if not self.source:
- if not self.size:
- msger.error("The %s partition has a size of zero. Please "
- "specify a non-zero --size for that partition." % \
- self.mountpoint)
- if self.fstype and self.fstype == "swap":
+ if not self.size and not self.fixed_size:
+ raise WicError("The %s partition has a size of zero. Please "
+ "specify a non-zero --size/--fixed-size for that "
+ "partition." % self.mountpoint)
+
+ if self.fstype == "swap":
self.prepare_swap_partition(cr_workdir, oe_builddir,
native_sysroot)
self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
- elif self.fstype:
+ else:
+ if self.fstype == 'squashfs':
+ raise WicError("It's not possible to create empty squashfs "
+ "partition '%s'" % (self.mountpoint))
+
rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
- for prefix in ("ext", "btrfs", "vfat", "squashfs"):
- if self.fstype.startswith(prefix):
- method = getattr(self,
- "prepare_empty_partition_" + prefix)
- method(rootfs, oe_builddir, native_sysroot)
- self.source_file = rootfs
- break
+
+ prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+ method = getattr(self, "prepare_empty_partition_" + prefix)
+ method(rootfs, oe_builddir, native_sysroot)
+ self.source_file = rootfs
return
- plugins = pluginmgr.get_source_plugins()
+ plugins = PluginMgr.get_plugins('source')
if self.source not in plugins:
- msger.error("The '%s' --source specified for %s doesn't exist.\n\t"
- "See 'wic list source-plugins' for a list of available"
- " --sources.\n\tSee 'wic help source-plugins' for "
- "details on adding a new source plugin." % \
- (self.source, self.mountpoint))
-
- self._source_methods = pluginmgr.get_source_plugin_methods(\
- self.source, partition_methods)
- self._source_methods["do_configure_partition"](self, self.sourceparams_dict,
- creator, cr_workdir,
- oe_builddir,
- bootimg_dir,
- kernel_dir,
- native_sysroot)
- self._source_methods["do_stage_partition"](self, self.sourceparams_dict,
- creator, cr_workdir,
- oe_builddir,
- bootimg_dir, kernel_dir,
- native_sysroot)
- self._source_methods["do_prepare_partition"](self, self.sourceparams_dict,
- creator, cr_workdir,
- oe_builddir,
- bootimg_dir, kernel_dir, rootfs_dir,
- native_sysroot)
-
- def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir,
- rootfs_dir):
- """
- Handle an already-created partition e.g. xxx.ext3
- """
- rootfs = oe_builddir
- du_cmd = "du -Lbks %s" % rootfs
- out = exec_cmd(du_cmd)
- rootfs_size = out.split()[0]
+ raise WicError("The '%s' --source specified for %s doesn't exist.\n\t"
+ "See 'wic list source-plugins' for a list of available"
+ " --sources.\n\tSee 'wic help source-plugins' for "
+ "details on adding a new source plugin." %
+ (self.source, self.mountpoint))
- self.size = rootfs_size
- self.source_file = rootfs
+ srcparams_dict = {}
+ if self.sourceparams:
+ # Split sourceparams string of the form key1=val1[,key2=val2,...]
+ # into a dict. Also accepts valueless keys i.e. without =
+ splitted = self.sourceparams.split(',')
+ srcparams_dict = dict(par.split('=', 1) for par in splitted if par)
+
+ plugin = PluginMgr.get_plugins('source')[self.source]
+ plugin.do_configure_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, native_sysroot)
+ plugin.do_stage_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, native_sysroot)
+ plugin.do_prepare_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, rootfs_dir, native_sysroot)
+ plugin.do_post_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, rootfs_dir, native_sysroot)
+
+ # further processing required Partition.size to be an integer, make
+ # sure that it is one
+ if not isinstance(self.size, int):
+ raise WicError("Partition %s internal size is not an integer. "
+ "This a bug in source plugin %s and needs to be fixed." %
+ (self.mountpoint, self.source))
+
+ if self.fixed_size and self.size > self.fixed_size:
+ raise WicError("File system image of partition %s is "
+ "larger (%d kB) than its allowed size %d kB" %
+ (self.mountpoint, self.size, self.fixed_size))
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
- native_sysroot):
+ native_sysroot, real_rootfs = True):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
- Currently handles ext2/3/4, btrfs and vfat.
+ Currently handles ext2/3/4, btrfs, vfat and squashfs.
"""
p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
- "%s/../pseudo" % rootfs_dir)
+ "%s/../pseudo" % rootfs_dir)
p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
- pseudo += "%s/usr/bin/pseudo " % native_sysroot
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
- for prefix in ("ext", "btrfs", "vfat", "squashfs"):
- if self.fstype.startswith(prefix):
- method = getattr(self, "prepare_rootfs_" + prefix)
- method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
-
- self.source_file = rootfs
-
- # get the rootfs size in the right units for kickstart (kB)
- du_cmd = "du -Lbks %s" % rootfs
+ if not self.size and real_rootfs:
+ # The rootfs size is not set in .ks file so try to get it
+ # from bitbake variable
+ rsize_bb = get_bitbake_var('ROOTFS_SIZE')
+ rdir = get_bitbake_var('IMAGE_ROOTFS')
+ if rsize_bb and rdir == rootfs_dir:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ self.size = int(round(float(rsize_bb)))
+ else:
+ # Bitbake variable ROOTFS_SIZE is not defined so compute it
+ # from the rootfs_dir size using the same logic found in
+ # get_rootfs_size() from meta/classes/image.bbclass
+ du_cmd = "du -ks %s" % rootfs_dir
out = exec_cmd(du_cmd)
- self.size = out.split()[0]
+ self.size = int(out.split()[0])
- break
+ prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+ method = getattr(self, "prepare_rootfs_" + prefix)
+ method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ self.source_file = rootfs
+
+ # get the rootfs size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % rootfs
+ out = exec_cmd(du_cmd)
+ self.size = int(out.split()[0])
def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
@@ -210,82 +250,71 @@ class Partition():
out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
- extra_blocks = self.get_extra_block_count(actual_rootfs_size)
- if extra_blocks < self.extra_space:
- extra_blocks = self.extra_space
-
- rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
-
- msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
- (extra_blocks, self.mountpoint, rootfs_size))
+ rootfs_size = self.get_rootfs_size(actual_rootfs_size)
- exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), rootfs_size * 1024)
- extra_imagecmd = "-i 8192"
+ extraopts = self.mkfs_extraopts or "-F -i 8192"
label_str = ""
if self.label:
label_str = "-L %s" % self.label
- mkfs_cmd = "mkfs.%s -F %s %s %s -d %s" % \
- (self.fstype, extra_imagecmd, rootfs, label_str, rootfs_dir)
+ mkfs_cmd = "mkfs.%s %s %s %s -U %s -d %s" % \
+ (self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
-
- Currently handles ext2/3/4 and btrfs.
"""
du_cmd = "du -ks %s" % rootfs_dir
out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
- extra_blocks = self.get_extra_block_count(actual_rootfs_size)
- if extra_blocks < self.extra_space:
- extra_blocks = self.extra_space
-
- rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
+ rootfs_size = self.get_rootfs_size(actual_rootfs_size)
- msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
- (extra_blocks, self.mountpoint, rootfs_size))
-
- exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), rootfs_size * 1024)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
- mkfs_cmd = "mkfs.%s -b %d -r %s %s %s" % \
- (self.fstype, rootfs_size * 1024, rootfs_dir, label_str, rootfs)
+ mkfs_cmd = "mkfs.%s -b %d -r %s %s %s -U %s %s" % \
+ (self.fstype, rootfs_size * 1024, rootfs_dir, label_str,
+ self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_vfat(self, rootfs, oe_builddir, rootfs_dir,
- native_sysroot, pseudo):
+ def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
"""
- Prepare content for a vfat rootfs partition.
+ Prepare content for a msdos/vfat rootfs partition.
"""
du_cmd = "du -bks %s" % rootfs_dir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
- extra_blocks = self.get_extra_block_count(blocks)
- if extra_blocks < self.extra_space:
- extra_blocks = self.extra_space
-
- blocks += extra_blocks
-
- msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
- (extra_blocks, self.mountpoint, blocks))
+ rootfs_size = self.get_rootfs_size(blocks)
label_str = "-n boot"
if self.label:
label_str = "-n %s" % self.label
- dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
+ size_str = ""
+ if self.fstype == 'msdos':
+ size_str = "-F 16" # FAT 16
+
+ extraopts = self.mkfs_extraopts or '-S 512'
+
+ dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
+ (label_str, self.fsuuid, size_str, extraopts, rootfs,
+ rootfs_size)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
@@ -294,13 +323,16 @@ class Partition():
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
+ prepare_rootfs_vfat = prepare_rootfs_msdos
+
def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
"""
- squashfs_cmd = "mksquashfs %s %s -noappend" % \
- (rootfs_dir, rootfs)
+ extraopts = self.mkfs_extraopts or '-noappend'
+ squashfs_cmd = "mksquashfs %s %s %s" % \
+ (rootfs_dir, rootfs, extraopts)
exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
@@ -308,16 +340,18 @@ class Partition():
"""
Prepare an empty ext2/3/4 partition.
"""
- exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
+ size = self.disk_size
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), size * 1024)
- extra_imagecmd = "-i 8192"
+ extraopts = self.mkfs_extraopts or "-i 8192"
label_str = ""
if self.label:
label_str = "-L %s" % self.label
- mkfs_cmd = "mkfs.%s -F %s %s %s" % \
- (self.fstype, extra_imagecmd, label_str, rootfs)
+ mkfs_cmd = "mkfs.%s -F %s %s -U %s %s" % \
+ (self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
@@ -325,60 +359,46 @@ class Partition():
"""
Prepare an empty btrfs partition.
"""
- exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
+ size = self.disk_size
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), size * 1024)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
- mkfs_cmd = "mkfs.%s -b %d %s %s" % \
- (self.fstype, self.size * 1024, label_str, rootfs)
+ mkfs_cmd = "mkfs.%s -b %d %s -U %s %s %s" % \
+ (self.fstype, self.size * 1024, label_str, self.fsuuid,
+ self.mkfs_extraopts, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
- def prepare_empty_partition_vfat(self, rootfs, oe_builddir,
- native_sysroot):
+ def prepare_empty_partition_msdos(self, rootfs, oe_builddir,
+ native_sysroot):
"""
Prepare an empty vfat partition.
"""
- blocks = self.size
+ blocks = self.disk_size
label_str = "-n boot"
if self.label:
label_str = "-n %s" % self.label
- dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
- exec_native_cmd(dosfs_cmd, native_sysroot)
-
- chmod_cmd = "chmod 644 %s" % rootfs
- exec_cmd(chmod_cmd)
-
- def prepare_empty_partition_squashfs(self, cr_workdir, oe_builddir,
- native_sysroot):
- """
- Prepare an empty squashfs partition.
- """
- msger.warning("Creating of an empty squashfs %s partition was attempted. " \
- "Proceeding as requested." % self.mountpoint)
-
- path = "%s/fs_%s.%s" % (cr_workdir, self.label, self.fstype)
- os.path.isfile(path) and os.remove(path)
+ size_str = ""
+ if self.fstype == 'msdos':
+ size_str = "-F 16" # FAT 16
- # it is not possible to create a squashfs without source data,
- # thus prepare an empty temp dir that is used as source
- tmpdir = tempfile.mkdtemp()
+ extraopts = self.mkfs_extraopts or '-S 512'
- squashfs_cmd = "mksquashfs %s %s -noappend" % \
- (tmpdir, path)
- exec_native_cmd(squashfs_cmd, native_sysroot)
+ dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
+ (label_str, self.fsuuid, extraopts, size_str, rootfs,
+ blocks)
- os.rmdir(tmpdir)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
- # get the rootfs size in the right units for kickstart (kB)
- du_cmd = "du -Lbks %s" % path
- out = exec_cmd(du_cmd)
- fs_size = out.split()[0]
+ chmod_cmd = "chmod 644 %s" % rootfs
+ exec_cmd(chmod_cmd)
- self.size = fs_size
+ prepare_empty_partition_vfat = prepare_empty_partition_msdos
def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
"""
@@ -386,12 +406,12 @@ class Partition():
"""
path = "%s/fs.%s" % (cr_workdir, self.fstype)
- exec_cmd("truncate %s -s %d" % (path, self.size * 1024))
+ with open(path, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), self.size * 1024)
- import uuid
label_str = ""
if self.label:
label_str = "-L %s" % self.label
- mkswap_cmd = "mkswap %s -U %s %s" % (label_str, str(uuid.uuid1()), path)
- exec_native_cmd(mkswap_cmd, native_sysroot)
+ mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
+ exec_native_cmd(mkswap_cmd, native_sysroot)
diff --git a/scripts/lib/wic/plugin.py b/scripts/lib/wic/plugin.py
deleted file mode 100644
index 306b32437e..0000000000
--- a/scripts/lib/wic/plugin.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os, sys
-
-from wic import msger
-from wic import pluginbase
-from wic.utils import errors
-from wic.utils.oe.misc import get_bitbake_var
-
-__ALL__ = ['PluginMgr', 'pluginmgr']
-
-PLUGIN_TYPES = ["imager", "source"]
-
-PLUGIN_DIR = "/lib/wic/plugins" # relative to scripts
-SCRIPTS_PLUGIN_DIR = "scripts" + PLUGIN_DIR
-
-class PluginMgr():
- plugin_dirs = {}
-
- # make the manager class as singleton
- _instance = None
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs)
-
- return cls._instance
-
- def __init__(self):
- wic_path = os.path.dirname(__file__)
- eos = wic_path.rfind('scripts') + len('scripts')
- scripts_path = wic_path[:eos]
- self.scripts_path = scripts_path
- self.plugin_dir = scripts_path + PLUGIN_DIR
- self.layers_path = None
-
- def _build_plugin_dir_list(self, plugin_dir, ptype):
- if self.layers_path is None:
- self.layers_path = get_bitbake_var("BBLAYERS")
- layer_dirs = []
-
- if self.layers_path is not None:
- for layer_path in self.layers_path.split():
- path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR, ptype)
- layer_dirs.append(path)
-
- path = os.path.join(plugin_dir, ptype)
- layer_dirs.append(path)
-
- return layer_dirs
-
- def append_dirs(self, dirs):
- for path in dirs:
- self._add_plugindir(path)
-
- # load all the plugins AGAIN
- self._load_all()
-
- def _add_plugindir(self, path):
- path = os.path.abspath(os.path.expanduser(path))
-
- if not os.path.isdir(path):
- return
-
- if path not in self.plugin_dirs:
- self.plugin_dirs[path] = False
- # the value True/False means "loaded"
-
- def _load_all(self):
- for (pdir, loaded) in self.plugin_dirs.items():
- if loaded:
- continue
-
- sys.path.insert(0, pdir)
- for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]:
- if mod and mod != '__init__':
- if mod in sys.modules:
- #self.plugin_dirs[pdir] = True
- msger.warning("Module %s already exists, skip" % mod)
- else:
- try:
- pymod = __import__(mod)
- self.plugin_dirs[pdir] = True
- msger.debug("Plugin module %s:%s imported"\
- % (mod, pymod.__file__))
- except ImportError as err:
- msg = 'Failed to load plugin %s/%s: %s' \
- % (os.path.basename(pdir), mod, err)
- msger.warning(msg)
-
- del sys.path[0]
-
- def get_plugins(self, ptype):
- """ the return value is dict of name:class pairs """
-
- if ptype not in PLUGIN_TYPES:
- raise errors.CreatorError('%s is not valid plugin type' % ptype)
-
- plugins_dir = self._build_plugin_dir_list(self.plugin_dir, ptype)
-
- self.append_dirs(plugins_dir)
-
- return pluginbase.get_plugins(ptype)
-
- def get_source_plugins(self):
- """
- Return list of available source plugins.
- """
- plugins_dir = self._build_plugin_dir_list(self.plugin_dir, 'source')
-
- self.append_dirs(plugins_dir)
-
- return self.get_plugins('source')
-
-
- def get_source_plugin_methods(self, source_name, methods):
- """
- The methods param is a dict with the method names to find. On
- return, the dict values will be filled in with pointers to the
- corresponding methods. If one or more methods are not found,
- None is returned.
- """
- return_methods = None
- for _source_name, klass in self.get_plugins('source').items():
- if _source_name == source_name:
- for _method_name in methods:
- if not hasattr(klass, _method_name):
- msger.warning("Unimplemented %s source interface for: %s"\
- % (_method_name, _source_name))
- return None
- func = getattr(klass, _method_name)
- methods[_method_name] = func
- return_methods = methods
- return return_methods
-
-pluginmgr = PluginMgr()
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index e737dee7bc..f74d6430fd 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -1,40 +1,78 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-__all__ = ['ImagerPlugin', 'SourcePlugin', 'get_plugins']
+__all__ = ['ImagerPlugin', 'SourcePlugin']
+
+import os
+import logging
-import sys
from collections import defaultdict
+from importlib.machinery import SourceFileLoader
+
+from wic import WicError
+from wic.misc import get_bitbake_var
+
+PLUGIN_TYPES = ["imager", "source"]
+
+SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+
+logger = logging.getLogger('wic')
+
+PLUGINS = defaultdict(dict)
+
+class PluginMgr:
+ _plugin_dirs = []
+
+ @classmethod
+ def get_plugins(cls, ptype):
+ """Get dictionary of <plugin_name>:<class> pairs."""
+ if ptype not in PLUGIN_TYPES:
+ raise WicError('%s is not valid plugin type' % ptype)
+
+ # collect plugin directories
+ if not cls._plugin_dirs:
+ cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
+ layers = get_bitbake_var("BBLAYERS") or ''
+ for layer_path in layers.split():
+ path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
+
+ if ptype not in PLUGINS:
+ # load all ptype plugins
+ for pdir in cls._plugin_dirs:
+ ppath = os.path.join(pdir, ptype)
+ if os.path.isdir(ppath):
+ for fname in os.listdir(ppath):
+ if fname.endswith('.py'):
+ mname = fname[:-3]
+ mpath = os.path.join(ppath, fname)
+ logger.debug("loading plugin module %s", mpath)
+ SourceFileLoader(mname, mpath).load_module()
-from wic import msger
+ return PLUGINS.get(ptype)
class PluginMeta(type):
- plugins = defaultdict(dict)
def __new__(cls, name, bases, attrs):
class_type = type.__new__(cls, name, bases, attrs)
if 'name' in attrs:
- cls.plugins[class_type.wic_plugin_type][attrs['name']] = class_type
+ PLUGINS[class_type.wic_plugin_type][attrs['name']] = class_type
return class_type
-class ImagerPlugin(PluginMeta("Plugin", (), {})):
+class ImagerPlugin(metaclass=PluginMeta):
wic_plugin_type = "imager"
-class SourcePlugin(PluginMeta("Plugin", (), {})):
+ def do_create(self):
+ raise WicError("Method %s.do_create is not implemented" %
+ self.__class__.__name__)
+
+class SourcePlugin(metaclass=PluginMeta):
wic_plugin_type = "source"
"""
The methods that can be implemented by --source plugins.
@@ -50,7 +88,7 @@ class SourcePlugin(PluginMeta("Plugin", (), {})):
disk image. This provides a hook to allow finalization of a
disk image e.g. to write an MBR to it.
"""
- msger.debug("SourcePlugin: do_install_disk: disk: %s" % disk_name)
+ logger.debug("SourcePlugin: do_install_disk: disk: %s", disk_name)
@classmethod
def do_stage_partition(cls, part, source_params, creator, cr_workdir,
@@ -67,7 +105,7 @@ class SourcePlugin(PluginMeta("Plugin", (), {})):
Not that get_bitbake_var() allows you to acces non-standard
variables that you might want to use for this.
"""
- msger.debug("SourcePlugin: do_stage_partition: part: %s" % part)
+ logger.debug("SourcePlugin: do_stage_partition: part: %s", part)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
@@ -78,7 +116,7 @@ class SourcePlugin(PluginMeta("Plugin", (), {})):
custom configuration files for a partition, for example
syslinux or grub config files.
"""
- msger.debug("SourcePlugin: do_configure_partition: part: %s" % part)
+ logger.debug("SourcePlugin: do_configure_partition: part: %s", part)
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -88,7 +126,14 @@ class SourcePlugin(PluginMeta("Plugin", (), {})):
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
- msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part)
+ logger.debug("SourcePlugin: do_prepare_partition: part: %s", part)
-def get_plugins(typen):
- return PluginMeta.plugins.get(typen)
+ @classmethod
+ def do_post_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
+ native_sysroot):
+ """
+ Called after the partition is created. It is useful to add post
+ operations e.g. security signing the partition.
+ """
+ logger.debug("SourcePlugin: do_post_partition: part: %s", part)
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
new file mode 100644
index 0000000000..2441cc33ad
--- /dev/null
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -0,0 +1,602 @@
+#
+# Copyright (c) 2013, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# DESCRIPTION
+# This implements the 'direct' imager plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+import random
+import shutil
+import tempfile
+import uuid
+
+from time import strftime
+
+from oe.path import copyhardlinktree
+
+from wic import WicError
+from wic.filemap import sparse_copy
+from wic.ksparser import KickStart, KickStartError
+from wic.pluginbase import PluginMgr, ImagerPlugin
+from wic.misc import get_bitbake_var, exec_cmd, exec_native_cmd
+
+logger = logging.getLogger('wic')
+
+class DirectPlugin(ImagerPlugin):
+ """
+ Install a system into a file containing a partitioned disk image.
+
+ An image file is formatted with a partition table, each partition
+ created from a rootfs or other OpenEmbedded build artifact and dd'ed
+ into the virtual disk. The disk image can subsequently be dd'ed onto
+ media and used on actual hardware.
+ """
+ name = 'direct'
+
+ def __init__(self, wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, oe_builddir, options):
+ try:
+ self.ks = KickStart(wks_file)
+ except KickStartError as err:
+ raise WicError(str(err))
+
+ # parse possible 'rootfs=name' items
+ self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
+ self.bootimg_dir = bootimg_dir
+ self.kernel_dir = kernel_dir
+ self.native_sysroot = native_sysroot
+ self.oe_builddir = oe_builddir
+
+ self.outdir = options.outdir
+ self.compressor = options.compressor
+ self.bmap = options.bmap
+ self.no_fstab_update = options.no_fstab_update
+ self.original_fstab = None
+
+ self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
+ strftime("%Y%m%d%H%M"))
+ self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self._image = None
+ self.ptable_format = self.ks.bootloader.ptable
+ self.parts = self.ks.partitions
+
+ # as a convenience, set source to the boot partition source
+ # instead of forcing it to be set via bootloader --source
+ for part in self.parts:
+ if not self.ks.bootloader.source and part.mountpoint == "/boot":
+ self.ks.bootloader.source = part.source
+ break
+
+ image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
+ self._image = PartitionedImage(image_path, self.ptable_format,
+ self.parts, self.native_sysroot)
+
+ def do_create(self):
+ """
+ Plugin entry point.
+ """
+ try:
+ self.create()
+ self.assemble()
+ self.finalize()
+ self.print_info()
+ finally:
+ self.cleanup()
+
+ def _write_fstab(self, image_rootfs):
+ """overriden to generate fstab (temporarily) in rootfs. This is called
+ from _create, make sure it doesn't get called from
+ BaseImage.create()
+ """
+ if not image_rootfs:
+ return
+
+ fstab_path = image_rootfs + "/etc/fstab"
+ if not os.path.isfile(fstab_path):
+ return
+
+ with open(fstab_path) as fstab:
+ fstab_lines = fstab.readlines()
+ self.original_fstab = fstab_lines.copy()
+
+ if self._update_fstab(fstab_lines, self.parts):
+ with open(fstab_path, "w") as fstab:
+ fstab.writelines(fstab_lines)
+ else:
+ self.original_fstab = None
+
+ def _update_fstab(self, fstab_lines, parts):
+ """Assume partition order same as in wks"""
+ updated = False
+ for part in parts:
+ if not part.realnum or not part.mountpoint \
+ or part.mountpoint == "/":
+ continue
+
+ if part.use_uuid:
+ if part.fsuuid:
+ # FAT UUID is different from others
+ if len(part.fsuuid) == 10:
+ device_name = "UUID=%s-%s" % \
+ (part.fsuuid[2:6], part.fsuuid[6:])
+ else:
+ device_name = "UUID=%s" % part.fsuuid
+ else:
+ device_name = "PARTUUID=%s" % part.uuid
+ elif part.use_label:
+ device_name = "LABEL=%s" % part.label
+ else:
+ # mmc device partitions are named mmcblk0p1, mmcblk0p2..
+ prefix = 'p' if part.disk.startswith('mmcblk') else ''
+ device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
+
+ opts = part.fsopts if part.fsopts else "defaults"
+ line = "\t".join([device_name, part.mountpoint, part.fstype,
+ opts, "0", "0"]) + "\n"
+
+ fstab_lines.append(line)
+ updated = True
+
+ return updated
+
+ def _full_path(self, path, name, extention):
+ """ Construct full file path to a file we generate. """
+ return os.path.join(path, "%s-%s.%s" % (self.name, name, extention))
+
+ #
+ # Actual implemention
+ #
+ def create(self):
+ """
+ For 'wic', we already have our build artifacts - we just create
+ filesystems from the artifacts directly and combine them into
+ a partitioned image.
+ """
+ if not self.no_fstab_update:
+ self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+
+ for part in self.parts:
+ # get rootfs size from bitbake variable if it's not set in .ks file
+ if not part.size:
+ # and if rootfs name is specified for the partition
+ image_name = self.rootfs_dir.get(part.rootfs_dir)
+ if image_name and os.path.sep not in image_name:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
+ if rsize_bb:
+ part.size = int(round(float(rsize_bb)))
+
+ self._image.prepare(self)
+ self._image.layout_partitions()
+ self._image.create()
+
+ def assemble(self):
+ """
+ Assemble partitions into disk image
+ """
+ self._image.assemble()
+
+ def finalize(self):
+ """
+ Finalize the disk image.
+
+ For example, prepare the image to be bootable by e.g.
+ creating and installing a bootloader configuration.
+ """
+ source_plugin = self.ks.bootloader.source
+ disk_name = self.parts[0].disk
+ if source_plugin:
+ plugin = PluginMgr.get_plugins('source')[source_plugin]
+ plugin.do_install_disk(self._image, disk_name, self, self.workdir,
+ self.oe_builddir, self.bootimg_dir,
+ self.kernel_dir, self.native_sysroot)
+
+ full_path = self._image.path
+ # Generate .bmap
+ if self.bmap:
+ logger.debug("Generating bmap file for %s", disk_name)
+ python = os.path.join(self.native_sysroot, 'usr/bin/python3-native/python3')
+ bmaptool = os.path.join(self.native_sysroot, 'usr/bin/bmaptool')
+ exec_native_cmd("%s %s create %s -o %s.bmap" % \
+ (python, bmaptool, full_path, full_path), self.native_sysroot)
+ # Compress the image
+ if self.compressor:
+ logger.debug("Compressing disk %s with %s", disk_name, self.compressor)
+ exec_cmd("%s %s" % (self.compressor, full_path))
+
+ def print_info(self):
+ """
+ Print the image(s) and artifacts used, for the user.
+ """
+ msg = "The new image(s) can be found here:\n"
+
+ extension = "direct" + {"gzip": ".gz",
+ "bzip2": ".bz2",
+ "xz": ".xz",
+ None: ""}.get(self.compressor)
+ full_path = self._full_path(self.outdir, self.parts[0].disk, extension)
+ msg += ' %s\n\n' % full_path
+
+ msg += 'The following build artifacts were used to create the image(s):\n'
+ for part in self.parts:
+ if part.rootfs_dir is None:
+ continue
+ if part.mountpoint == '/':
+ suffix = ':'
+ else:
+ suffix = '["%s"]:' % (part.mountpoint or part.label)
+ rootdir = part.rootfs_dir
+ msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
+
+ msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
+ msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
+ msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
+
+ logger.info(msg)
+
+ @property
+ def rootdev(self):
+ """
+ Get root device name to use as a 'root' parameter
+ in kernel command line.
+
+ Assume partition order same as in wks
+ """
+ for part in self.parts:
+ if part.mountpoint == "/":
+ if part.uuid:
+ return "PARTUUID=%s" % part.uuid
+ else:
+ suffix = 'p' if part.disk.startswith('mmcblk') else ''
+ return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
+
+ def cleanup(self):
+ if self._image:
+ self._image.cleanup()
+
+ # Move results to the output dir
+ if not os.path.exists(self.outdir):
+ os.makedirs(self.outdir)
+
+ for fname in os.listdir(self.workdir):
+ path = os.path.join(self.workdir, fname)
+ if os.path.isfile(path):
+ shutil.move(path, os.path.join(self.outdir, fname))
+
+ #Restore original fstab
+ if self.original_fstab:
+ fstab_path = self.rootfs_dir.get("ROOTFS_DIR") + "/etc/fstab"
+ with open(fstab_path, "w") as fstab:
+ fstab.writelines(self.original_fstab)
+
+ # remove work directory
+ shutil.rmtree(self.workdir, ignore_errors=True)
+
+# Overhead of the MBR partitioning scheme (just one sector)
+MBR_OVERHEAD = 1
+
+# Overhead of the GPT partitioning scheme
+GPT_OVERHEAD = 34
+
+# Size of a sector in bytes
+SECTOR_SIZE = 512
+
+class PartitionedImage():
+ """
+ Partitioned image in a file.
+ """
+
+ def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+ self.path = path # Path to the image file
+ self.numpart = 0 # Number of allocated partitions
+ self.realpart = 0 # Number of partitions in the partition table
+ self.primary_part_num = 0 # Number of primary partitions (msdos)
+ self.extendedpart = 0 # Create extended partition before this logical partition (msdos)
+ self.extended_size_sec = 0 # Size of exteded partition (msdos)
+ self.logical_part_cnt = 0 # Number of total logical paritions (msdos)
+ self.offset = 0 # Offset of next partition (in sectors)
+ self.min_size = 0 # Minimum required disk size to fit
+ # all partitions (in bytes)
+ self.ptable_format = ptable_format # Partition table format
+ # Disk system identifier
+ self.identifier = random.SystemRandom().randint(1, 0xffffffff)
+
+ self.partitions = partitions
+ self.partimages = []
+ # Size of a sector used in calculations
+ self.sector_size = SECTOR_SIZE
+ self.native_sysroot = native_sysroot
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
+
+ # calculate the real partition number, accounting for partitions not
+ # in the partition table and logical partitions
+ realnum = 0
+ for part in self.partitions:
+ if part.no_table:
+ part.realnum = 0
+ else:
+ realnum += 1
+ if self.ptable_format == 'msdos' and realnum > 3 and num_real_partitions > 4:
+ part.realnum = realnum + 1
+ continue
+ part.realnum = realnum
+
+ # generate parition and filesystem UUIDs
+ for part in self.partitions:
+ if not part.uuid and part.use_uuid:
+ if self.ptable_format == 'gpt':
+ part.uuid = str(uuid.uuid4())
+ else: # msdos partition table
+ part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
+ if not part.fsuuid:
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
+ else:
+ part.fsuuid = str(uuid.uuid4())
+
+ def prepare(self, imager):
+ """Prepare an image. Call prepare method of all image partitions."""
+ for part in self.partitions:
+ # need to create the filesystems in order to get their
+ # sizes before we can add them and do the layout.
+ part.prepare(imager, imager.workdir, imager.oe_builddir,
+ imager.rootfs_dir, imager.bootimg_dir,
+ imager.kernel_dir, imager.native_sysroot)
+
+ # Converting kB to sectors for parted
+ part.size_sec = part.disk_size * 1024 // self.sector_size
+
+ def layout_partitions(self):
+ """ Layout the partitions, meaning calculate the position of every
+ partition on the disk. The 'ptable_format' parameter defines the
+ partition table format and may be "msdos". """
+
+ logger.debug("Assigning %s partitions to disks", self.ptable_format)
+
+ # The number of primary and logical partitions. Extended partition and
+ # partitions not listed in the table are not included.
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
+
+ # Go through partitions in the order they are added in .ks file
+ for num in range(len(self.partitions)):
+ part = self.partitions[num]
+
+ if self.ptable_format == 'msdos' and part.part_name:
+ raise WicError("setting custom partition name is not " \
+ "implemented for msdos partitions")
+
+ if self.ptable_format == 'msdos' and part.part_type:
+ # The --part-type can also be implemented for MBR partitions,
+ # in which case it would map to the 1-byte "partition type"
+ # filed at offset 3 of the partition entry.
+ raise WicError("setting custom partition type is not " \
+ "implemented for msdos partitions")
+
+ # Get the disk where the partition is located
+ self.numpart += 1
+ if not part.no_table:
+ self.realpart += 1
+
+ if self.numpart == 1:
+ if self.ptable_format == "msdos":
+ overhead = MBR_OVERHEAD
+ elif self.ptable_format == "gpt":
+ overhead = GPT_OVERHEAD
+
+ # Skip one sector required for the partitioning scheme overhead
+ self.offset += overhead
+
+ if self.ptable_format == "msdos":
+ if self.primary_part_num > 3 or \
+ (self.extendedpart == 0 and self.primary_part_num >= 3 and num_real_partitions > 4):
+ part.type = 'logical'
+ # Reserve a sector for EBR for every logical partition
+ # before alignment is performed.
+ if part.type == 'logical':
+ self.offset += 1
+
+ align_sectors = 0
+ if part.align:
+ # If not first partition and we do have alignment set we need
+ # to align the partition.
+ # FIXME: This leaves a empty spaces to the disk. To fill the
+ # gaps we could enlargea the previous partition?
+
+ # Calc how much the alignment is off.
+ align_sectors = self.offset % (part.align * 1024 // self.sector_size)
+
+ if align_sectors:
+ # If partition is not aligned as required, we need
+ # to move forward to the next alignment point
+ align_sectors = (part.align * 1024 // self.sector_size) - align_sectors
+
+ logger.debug("Realignment for %s%s with %s sectors, original"
+ " offset %s, target alignment is %sK.",
+ part.disk, self.numpart, align_sectors,
+ self.offset, part.align)
+
+ # increase the offset so we actually start the partition on right alignment
+ self.offset += align_sectors
+
+ part.start = self.offset
+ self.offset += part.size_sec
+
+ if not part.no_table:
+ part.num = self.realpart
+ else:
+ part.num = 0
+
+ if self.ptable_format == "msdos" and not part.no_table:
+ if part.type == 'logical':
+ self.logical_part_cnt += 1
+ part.num = self.logical_part_cnt + 4
+ if self.extendedpart == 0:
+ # Create extended partition as a primary partition
+ self.primary_part_num += 1
+ self.extendedpart = part.num
+ else:
+ self.extended_size_sec += align_sectors
+ self.extended_size_sec += part.size_sec + 1
+ else:
+ self.primary_part_num += 1
+ part.num = self.primary_part_num
+
+ logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
+ "sectors (%d bytes).", part.mountpoint, part.disk,
+ part.num, part.start, self.offset - 1, part.size_sec,
+ part.size_sec * self.sector_size)
+
+ # Once all the partitions have been layed out, we can calculate the
+ # minumim disk size
+ self.min_size = self.offset
+ if self.ptable_format == "gpt":
+ self.min_size += GPT_OVERHEAD
+
+ self.min_size *= self.sector_size
+
+ def _create_partition(self, device, parttype, fstype, start, size):
+ """ Create a partition on an image described by the 'device' object. """
+
+ # Start is included to the size so we need to substract one from the end.
+ end = start + size - 1
+ logger.debug("Added '%s' partition, sectors %d-%d, size %d sectors",
+ parttype, start, end, size)
+
+ cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
+ if fstype:
+ cmd += " %s" % fstype
+ cmd += " %d %d" % (start, end)
+
+ return exec_native_cmd(cmd, self.native_sysroot)
+
+ def create(self):
+ logger.debug("Creating sparse file %s", self.path)
+ with open(self.path, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), self.min_size)
+
+ logger.debug("Initializing partition table for %s", self.path)
+ exec_native_cmd("parted -s %s mklabel %s" %
+ (self.path, self.ptable_format), self.native_sysroot)
+
+ logger.debug("Set disk identifier %x", self.identifier)
+ with open(self.path, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(self.identifier.to_bytes(4, 'little'))
+
+ logger.debug("Creating partitions")
+
+ for part in self.partitions:
+ if part.num == 0:
+ continue
+
+ if self.ptable_format == "msdos" and part.num == self.extendedpart:
+ # Create an extended partition (note: extended
+ # partition is described in MBR and contains all
+ # logical partitions). The logical partitions save a
+ # sector for an EBR just before the start of a
+ # partition. The extended partition must start one
+ # sector before the start of the first logical
+ # partition. This way the first EBR is inside of the
+ # extended partition. Since the extended partitions
+ # starts a sector before the first logical partition,
+ # add a sector at the back, so that there is enough
+ # room for all logical partitions.
+ self._create_partition(self.path, "extended",
+ None, part.start - 1,
+ self.extended_size_sec)
+
+ if part.fstype == "swap":
+ parted_fs_type = "linux-swap"
+ elif part.fstype == "vfat":
+ parted_fs_type = "fat32"
+ elif part.fstype == "msdos":
+ parted_fs_type = "fat16"
+ if not part.system_id:
+ part.system_id = '0x6' # FAT16
+ else:
+ # Type for ext2/ext3/ext4/btrfs
+ parted_fs_type = "ext2"
+
+ # Boot ROM of OMAP boards require vfat boot partition to have an
+ # even number of sectors.
+ if part.mountpoint == "/boot" and part.fstype in ["vfat", "msdos"] \
+ and part.size_sec % 2:
+ logger.debug("Subtracting one sector from '%s' partition to "
+ "get even number of sectors for the partition",
+ part.mountpoint)
+ part.size_sec -= 1
+
+ self._create_partition(self.path, part.type,
+ parted_fs_type, part.start, part.size_sec)
+
+ if part.part_name:
+ logger.debug("partition %d: set name to %s",
+ part.num, part.part_name)
+ exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
+ (part.num, part.part_name,
+ self.path), self.native_sysroot)
+
+ if part.part_type:
+ logger.debug("partition %d: set type UID to %s",
+ part.num, part.part_type)
+ exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
+ (part.num, part.part_type,
+ self.path), self.native_sysroot)
+
+ if part.uuid and self.ptable_format == "gpt":
+ logger.debug("partition %d: set UUID to %s",
+ part.num, part.uuid)
+ exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
+ (part.num, part.uuid, self.path),
+ self.native_sysroot)
+
+ if part.label and self.ptable_format == "gpt":
+ logger.debug("partition %d: set name to %s",
+ part.num, part.label)
+ exec_native_cmd("parted -s %s name %d %s" % \
+ (self.path, part.num, part.label),
+ self.native_sysroot)
+
+ if part.active:
+ flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+ logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
+ flag_name, part.num, self.path)
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (self.path, part.num, flag_name),
+ self.native_sysroot)
+ if part.system_id:
+ exec_native_cmd("sfdisk --part-type %s %s %s" % \
+ (self.path, part.num, part.system_id),
+ self.native_sysroot)
+
+ def cleanup(self):
+ # remove partition images
+ for image in set(self.partimages):
+ os.remove(image)
+
+ def assemble(self):
+ logger.debug("Installing partitions")
+
+ for part in self.partitions:
+ source = part.source_file
+ if source:
+ # install source_file contents into a partition
+ sparse_copy(source, self.path, seek=part.start * self.sector_size)
+
+ logger.debug("Installed %s in partition %d, sectors %d-%d, "
+ "size %d sectors", source, part.num, part.start,
+ part.start + part.size_sec - 1, part.size_sec)
+
+ partimage = self.path + '.p%d' % part.num
+ os.rename(source, partimage)
+ self.partimages.append(partimage)
diff --git a/scripts/lib/wic/plugins/imager/direct_plugin.py b/scripts/lib/wic/plugins/imager/direct_plugin.py
deleted file mode 100644
index 8fe3930804..0000000000
--- a/scripts/lib/wic/plugins/imager/direct_plugin.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# DESCRIPTION
-# This implements the 'direct' imager plugin class for 'wic'
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
-
-from wic.utils import errors
-from wic.conf import configmgr
-
-import wic.imager.direct as direct
-from wic.pluginbase import ImagerPlugin
-
-class DirectPlugin(ImagerPlugin):
- """
- Install a system into a file containing a partitioned disk image.
-
- An image file is formatted with a partition table, each partition
- created from a rootfs or other OpenEmbedded build artifact and dd'ed
- into the virtual disk. The disk image can subsequently be dd'ed onto
- media and used on actual hardware.
- """
-
- name = 'direct'
-
- @classmethod
- def __rootfs_dir_to_dict(cls, rootfs_dirs):
- """
- Gets a string that contain 'connection=dir' splitted by
- space and return a dict
- """
- krootfs_dir = {}
- for rootfs_dir in rootfs_dirs.split(' '):
- key, val = rootfs_dir.split('=')
- krootfs_dir[key] = val
-
- return krootfs_dir
-
- @classmethod
- def do_create(cls, opts, *args):
- """
- Create direct image, called from creator as 'direct' cmd
- """
- if len(args) != 8:
- raise errors.Usage("Extra arguments given")
-
- native_sysroot = args[0]
- kernel_dir = args[1]
- bootimg_dir = args[2]
- rootfs_dir = args[3]
-
- creatoropts = configmgr.create
- ksconf = args[4]
-
- image_output_dir = args[5]
- oe_builddir = args[6]
- compressor = args[7]
-
- krootfs_dir = cls.__rootfs_dir_to_dict(rootfs_dir)
-
- configmgr._ksconf = ksconf
-
- creator = direct.DirectImageCreator(oe_builddir,
- image_output_dir,
- krootfs_dir,
- bootimg_dir,
- kernel_dir,
- native_sysroot,
- compressor,
- creatoropts,
- opts.bmap)
-
- try:
- creator.create()
- creator.assemble()
- creator.finalize()
- creator.print_outimage_info()
-
- except errors.CreatorError:
- raise
- finally:
- creator.cleanup()
-
- return 0
diff --git a/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
new file mode 100644
index 0000000000..5bd7390680
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
@@ -0,0 +1,213 @@
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-biosplusefi' source plugin class for 'wic'
+#
+# AUTHORS
+# William Bourque <wbourque [at) gmail.com>
+
+import types
+
+from wic.pluginbase import SourcePlugin
+from importlib.machinery import SourceFileLoader
+
+class BootimgBiosPlusEFIPlugin(SourcePlugin):
+ """
+ Create MBR + EFI boot partition
+
+ This plugin creates a boot partition that contains both
+ legacy BIOS and EFI content. It will be able to boot from both.
+ This is useful when managing PC fleet with some older machines
+ without EFI support.
+
+ Note it is possible to create an image that can boot from both
+ legacy BIOS and EFI by defining two partitions : one with arg
+ --source bootimg-efi and another one with --source bootimg-pcbios.
+ However, this method has the obvious downside that it requires TWO
+ partitions to be created on the storage device.
+ Both partitions will also be marked as "bootable" which does not work on
+ most BIOS, has BIOS often uses the "bootable" flag to determine
+ what to boot. If you have such a BIOS, you need to manually remove the
+ "bootable" flag from the EFI partition for the drive to be bootable.
+ Having two partitions also seems to confuse wic : the content of
+ the first partition will be duplicated into the second, even though it
+ will not be used at all.
+
+ Also, unlike "isoimage-isohybrid" that also does BIOS and EFI, this plugin
+ allows you to have more than only a single rootfs partitions and does
+ not turn the rootfs into an initramfs RAM image.
+
+ This plugin is made to put everything into a single /boot partition so it
+ does not have the limitations listed above.
+
+ The plugin is made so it does tries not to reimplement what's already
+ been done in other plugins; as such it imports "bootimg-pcbios"
+ and "bootimg-efi".
+ Plugin "bootimg-pcbios" is used to generate legacy BIOS boot.
+ Plugin "bootimg-efi" is used to generate the UEFI boot. Note that it
+ requires a --sourceparams argument to know which loader to use; refer
+ to "bootimg-efi" code/documentation for the list of loader.
+
+ Imports are handled with "SourceFileLoader" from importlib as it is
+ otherwise very difficult to import module that has hyphen "-" in their
+ filename.
+ The SourcePlugin() methods used in the plugins (do_install_disk,
+ do_configure_partition, do_prepare_partition) are then called on both,
+ beginning by "bootimg-efi".
+
+ Plugin options, such as "--sourceparams" can still be passed to a
+ plugin, as long they does not cause issue in the other plugin.
+
+ Example wic configuration:
+ part /boot --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\\
+ --ondisk sda --label os_boot --active --align 1024 --use-uuid
+ """
+
+ name = 'bootimg-biosplusefi'
+
+ __PCBIOS_MODULE_NAME = "bootimg-pcbios"
+ __EFI_MODULE_NAME = "bootimg-efi"
+
+ __imgEFIObj = None
+ __imgBiosObj = None
+
+ @classmethod
+ def __init__(cls):
+ """
+ Constructor (init)
+ """
+
+ # XXX
+ # For some reasons, __init__ constructor is never called.
+ # Something to do with how pluginbase works?
+ cls.__instanciateSubClasses()
+
+ @classmethod
+ def __instanciateSubClasses(cls):
+ """
+
+ """
+
+ # Import bootimg-pcbios (class name "BootimgPcbiosPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__PCBIOS_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__PCBIOS_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgBiosObj = mod.BootimgPcbiosPlugin()
+
+ # Import bootimg-efi (class name "BootimgEFIPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__EFI_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__EFI_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgEFIObj = mod.BootimgEFIPlugin()
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition()
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 8bc362254d..2cfdc10ecd 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
@@ -24,25 +10,28 @@
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
+import logging
import os
import shutil
-from wic import msger
+from wic import WicError
+from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
-from wic.utils.misc import get_custom_config
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var, \
- BOOTDD_EXTRA_SPACE
+from wic.misc import (exec_cmd, exec_native_cmd,
+ get_bitbake_var, BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
class BootimgEFIPlugin(SourcePlugin):
"""
Create EFI boot partition.
- This plugin supports GRUB 2 and gummiboot bootloaders.
+ This plugin supports GRUB 2 and systemd-boot bootloaders.
"""
name = 'bootimg-efi'
@classmethod
- def do_configure_grubefi(cls, hdddir, creator, cr_workdir):
+ def do_configure_grubefi(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific (grub-efi) config
"""
@@ -53,36 +42,68 @@ class BootimgEFIPlugin(SourcePlugin):
if custom_cfg:
# Use a custom configuration for grub
grubefi_conf = custom_cfg
- msger.debug("Using custom configuration file "
- "%s for grub.cfg" % configfile)
+ logger.debug("Using custom configuration file "
+ "%s for grub.cfg", configfile)
else:
- msger.error("configfile is specified but failed to "
- "get it from %s." % configfile)
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ initrd = source_params.get('initrd')
+
+ if initrd:
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ logger.debug("Ignoring missing initrd")
if not custom_cfg:
# Create grub configuration using parameters from wks file
bootloader = creator.ks.bootloader
+ title = source_params.get('title')
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
grubefi_conf += "timeout=%s\n" % bootloader.timeout
- grubefi_conf += "menuentry 'boot'{\n"
+ grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
+
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
- kernel = "/bzImage"
+ grubefi_conf += "linux /%s %s rootwait %s\n" \
+ % (kernel, label_conf, bootloader.append)
+
+ if initrd:
+ initrds = initrd.split(';')
+ grubefi_conf += "initrd"
+ for rd in initrds:
+ grubefi_conf += " /%s" % rd
+ grubefi_conf += "\n"
- grubefi_conf += "linux %s root=%s rootwait %s\n" \
- % (kernel, creator.rootdev, bootloader.append)
grubefi_conf += "}\n"
- msger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg" \
- % cr_workdir)
+ logger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg",
+ cr_workdir)
cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
cfg.write(grubefi_conf)
cfg.close()
@classmethod
- def do_configure_gummiboot(cls, hdddir, creator, cr_workdir):
+ def do_configure_systemdboot(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific systemd-boot/gummiboot config
"""
@@ -98,8 +119,23 @@ class BootimgEFIPlugin(SourcePlugin):
loader_conf += "default boot\n"
loader_conf += "timeout %d\n" % bootloader.timeout
- msger.debug("Writing gummiboot config %s/hdd/boot/loader/loader.conf" \
- % cr_workdir)
+ initrd = source_params.get('initrd')
+
+ if initrd:
+ # obviously we need to have a common common deploy var
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ logger.debug("Ignoring missing initrd")
+
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/loader.conf", cr_workdir)
cfg = open("%s/hdd/boot/loader/loader.conf" % cr_workdir, "w")
cfg.write(loader_conf)
cfg.close()
@@ -109,26 +145,43 @@ class BootimgEFIPlugin(SourcePlugin):
if configfile:
custom_cfg = get_custom_config(configfile)
if custom_cfg:
- # Use a custom configuration for gummiboot
+ # Use a custom configuration for systemd-boot
boot_conf = custom_cfg
- msger.debug("Using custom configuration file "
- "%s for gummiboots's boot.conf" % configfile)
+ logger.debug("Using custom configuration file "
+ "%s for systemd-boots's boot.conf", configfile)
else:
- msger.error("configfile is specified but failed to "
- "get it from %s." % configfile)
+ raise WicError("configfile is specified but failed to "
+ "get it from %s.", configfile)
if not custom_cfg:
- # Create gummiboot configuration using parameters from wks file
- kernel = "/bzImage"
+ # Create systemd-boot configuration using parameters from wks file
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ title = source_params.get('title')
boot_conf = ""
- boot_conf += "title boot\n"
- boot_conf += "linux %s\n" % kernel
- boot_conf += "options LABEL=Boot root=%s %s\n" % \
- (creator.rootdev, bootloader.append)
+ boot_conf += "title %s\n" % (title if title else "boot")
+ boot_conf += "linux /%s\n" % kernel
+
+ label = source_params.get('label')
+ label_conf = "LABEL=Boot root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ boot_conf += "options %s %s\n" % \
+ (label_conf, bootloader.append)
+
+ if initrd:
+ initrds = initrd.split(';')
+ for rd in initrds:
+ boot_conf += "initrd /%s\n" % rd
- msger.debug("Writing gummiboot config %s/hdd/boot/loader/entries/boot.conf" \
- % cr_workdir)
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
cfg.write(boot_conf)
cfg.close()
@@ -148,14 +201,13 @@ class BootimgEFIPlugin(SourcePlugin):
try:
if source_params['loader'] == 'grub-efi':
- cls.do_configure_grubefi(hdddir, creator, cr_workdir)
- elif source_params['loader'] == 'gummiboot' \
- or source_params['loader'] == 'systemd-boot':
- cls.do_configure_gummiboot(hdddir, creator, cr_workdir)
+ cls.do_configure_grubefi(hdddir, creator, cr_workdir, source_params)
+ elif source_params['loader'] == 'systemd-boot':
+ cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
else:
- msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+ raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
except KeyError:
- msger.error("bootimg-efi requires a loader, none specified")
+ raise WicError("bootimg-efi requires a loader, none specified")
@classmethod
@@ -167,39 +219,46 @@ class BootimgEFIPlugin(SourcePlugin):
'prepares' the partition to be incorporated into the image.
In this case, prepare content for an EFI (grub) boot partition.
"""
- if not bootimg_dir:
- bootimg_dir = get_bitbake_var("HDDDIR")
- if not bootimg_dir:
- msger.error("Couldn't find HDDDIR, exiting\n")
- # just so the result notes display it
- creator.set_bootimg_dir(bootimg_dir)
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
staging_kernel_dir = kernel_dir
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (staging_kernel_dir, hdddir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
exec_cmd(install_cmd)
+
try:
if source_params['loader'] == 'grub-efi':
shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
"%s/grub.cfg" % cr_workdir)
- cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
- exec_cmd(cp_cmd, True)
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith("grub-efi-")]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[9:])
+ exec_cmd(cp_cmd, True)
shutil.move("%s/grub.cfg" % cr_workdir,
"%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
- elif source_params['loader'] == 'gummiboot' \
- or source_params['loader'] == 'systemd-boot':
- cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
- exec_cmd(cp_cmd, True)
+ elif source_params['loader'] == 'systemd-boot':
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
+ exec_cmd(cp_cmd, True)
else:
- msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+ raise WicError("unrecognized bootimg-efi loader: %s" %
+ source_params['loader'])
except KeyError:
- msger.error("bootimg-efi requires a loader, none specified")
+ raise WicError("bootimg-efi requires a loader, none specified")
- startup = os.path.join(bootimg_dir, "startup.nsh")
+ startup = os.path.join(kernel_dir, "startup.nsh")
if os.path.exists(startup):
cp_cmd = "cp %s %s/" % (startup, hdddir)
exec_cmd(cp_cmd, True)
@@ -215,13 +274,16 @@ class BootimgEFIPlugin(SourcePlugin):
blocks += extra_blocks
- msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
- (extra_blocks, part.mountpoint, blocks))
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, part.mountpoint, blocks)
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
- dosfs_cmd = "mkdosfs -n efi -C %s %d" % (bootimg, blocks)
+ label = part.label if part.label else "ESP"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
@@ -234,5 +296,5 @@ class BootimgEFIPlugin(SourcePlugin):
out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
- part.size = bootimg_size
+ part.size = int(bootimg_size)
part.source_file = bootimg
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index b76c1211ae..138986a71e 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-partition' source plugin class for
@@ -23,14 +10,19 @@
# Maciej Borzecki <maciej.borzecki (at] open-rnd.pl>
#
+import logging
import os
import re
-from wic import msger
-from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, get_bitbake_var
from glob import glob
+from wic import WicError
+from wic.engine import get_custom_config
+from wic.pluginbase import SourcePlugin
+from wic.misc import exec_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
+
class BootimgPartitionPlugin(SourcePlugin):
"""
Create an image of boot partition, copying over files
@@ -40,53 +32,36 @@ class BootimgPartitionPlugin(SourcePlugin):
name = 'bootimg-partition'
@classmethod
- def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
- bootimg_dir, kernel_dir, native_sysroot):
- """
- Called after all partitions have been prepared and assembled into a
- disk image. Do nothing.
- """
- pass
-
- @classmethod
def do_configure_partition(cls, part, source_params, cr, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- native_sysroot):
- """
- Called before do_prepare_partition(). Possibly prepare
- configuration files of some sort.
-
- """
- pass
-
- @classmethod
- def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
- rootfs_dir, native_sysroot):
+ native_sysroot):
"""
- Called to do the actual content population for a partition i.e. it
- 'prepares' the partition to be incorporated into the image.
- In this case, does the following:
- - sets up a vfat partition
- - copies all files listed in IMAGE_BOOT_FILES variable
+ Called before do_prepare_partition(), create u-boot specific boot config
"""
- hdddir = "%s/boot" % cr_workdir
+ hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
install_cmd = "install -d %s" % hdddir
exec_cmd(install_cmd)
- if not bootimg_dir:
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- msger.debug('Bootimg dir: %s' % bootimg_dir)
+ boot_files = None
+ for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
+ if fmt:
+ var = fmt % id
+ else:
+ var = ""
- boot_files = get_bitbake_var("IMAGE_BOOT_FILES")
+ boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var)
+ if boot_files is not None:
+ break
- if not boot_files:
- msger.error('No boot files defined, IMAGE_BOOT_FILES unset')
+ if boot_files is None:
+ raise WicError('No boot files defined, IMAGE_BOOT_FILES unset for entry #%d' % part.lineno)
- msger.debug('Boot files: %s' % boot_files)
+ logger.debug('Boot files: %s', boot_files)
# list of tuples (src_name, dst_name)
deploy_files = []
@@ -94,16 +69,16 @@ class BootimgPartitionPlugin(SourcePlugin):
if ';' in src_entry:
dst_entry = tuple(src_entry.split(';'))
if not dst_entry[0] or not dst_entry[1]:
- msger.error('Malformed boot file entry: %s' % (src_entry))
+ raise WicError('Malformed boot file entry: %s' % src_entry)
else:
dst_entry = (src_entry, src_entry)
- msger.debug('Destination entry: %r' % (dst_entry,))
+ logger.debug('Destination entry: %r', dst_entry)
deploy_files.append(dst_entry)
+ cls.install_task = [];
for deploy_entry in deploy_files:
src, dst = deploy_entry
- install_task = []
if '*' in src:
# by default install files under their basename
entry_name_fn = os.path.basename
@@ -114,27 +89,106 @@ class BootimgPartitionPlugin(SourcePlugin):
os.path.join(dst,
os.path.basename(name))
- srcs = glob(os.path.join(bootimg_dir, src))
+ srcs = glob(os.path.join(kernel_dir, src))
- msger.debug('Globbed sources: %s' % (', '.join(srcs)))
+ logger.debug('Globbed sources: %s', ', '.join(srcs))
for entry in srcs:
+ src = os.path.relpath(entry, kernel_dir)
entry_dst_name = entry_name_fn(entry)
- install_task.append((entry,
- os.path.join(hdddir,
- entry_dst_name)))
+ cls.install_task.append((src, entry_dst_name))
else:
- install_task = [(os.path.join(bootimg_dir, src),
- os.path.join(hdddir, dst))]
-
- for task in install_task:
- src_path, dst_path = task
- msger.debug('Install %s as %s' % (os.path.basename(src_path),
- dst_path))
- install_cmd = "install -m 0644 -D %s %s" \
- % (src_path, dst_path)
- exec_cmd(install_cmd)
-
- msger.debug('Prepare boot partition using rootfs in %s' % (hdddir))
- part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
- native_sysroot)
+ cls.install_task.append((src, dst))
+
+ if source_params.get('loader') != "u-boot":
+ return
+
+ configfile = cr.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for extlinux.conf
+ extlinux_conf = custom_cfg
+ logger.debug("Using custom configuration file "
+ "%s for extlinux.cfg", configfile)
+ else:
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ if not custom_cfg:
+ # The kernel types supported by the sysboot of u-boot
+ kernel_types = ["zImage", "Image", "fitImage", "uImage", "vmlinux"]
+ has_dtb = False
+ fdt_dir = '/'
+ kernel_name = None
+
+ # Find the kernel image name, from the highest precedence to lowest
+ for image in kernel_types:
+ for task in cls.install_task:
+ src, dst = task
+ if re.match(image, src):
+ kernel_name = os.path.join('/', dst)
+ break
+ if kernel_name:
+ break
+
+ for task in cls.install_task:
+ src, dst = task
+ # We suppose that all the dtb are in the same directory
+ if re.search(r'\.dtb', src) and fdt_dir == '/':
+ has_dtb = True
+ fdt_dir = os.path.join(fdt_dir, os.path.dirname(dst))
+ break
+
+ if not kernel_name:
+ raise WicError('No kernel file founded')
+
+ # Compose the extlinux.conf
+ extlinux_conf = "default Yocto\n"
+ extlinux_conf += "label Yocto\n"
+ extlinux_conf += " kernel %s\n" % kernel_name
+ if has_dtb:
+ extlinux_conf += " fdtdir %s\n" % fdt_dir
+ bootloader = cr.ks.bootloader
+ extlinux_conf += "append root=%s rootwait %s\n" \
+ % (cr.rootdev, bootloader.append if bootloader.append else '')
+
+ install_cmd = "install -d %s/extlinux/" % hdddir
+ exec_cmd(install_cmd)
+ cfg = open("%s/extlinux/extlinux.conf" % hdddir, "w")
+ cfg.write(extlinux_conf)
+ cfg.close()
+
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, does the following:
+ - sets up a vfat partition
+ - copies all files listed in IMAGE_BOOT_FILES variable
+ """
+ hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ logger.debug('Kernel dir: %s', bootimg_dir)
+
+
+ for task in cls.install_task:
+ src_path, dst_path = task
+ logger.debug('Install %s as %s', src_path, dst_path)
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (os.path.join(kernel_dir, src_path),
+ os.path.join(hdddir, dst_path))
+ exec_cmd(install_cmd)
+
+ logger.debug('Prepare boot partition using rootfs in %s', hdddir)
+ part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
+ native_sysroot, False)
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index f204daa323..f2639e7004 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
@@ -24,15 +10,17 @@
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
+import logging
import os
+import re
-from wic.utils.errors import ImageError
-from wic import msger
-from wic.utils import runner
-from wic.utils.misc import get_custom_config
+from wic import WicError
+from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, \
- get_bitbake_var, BOOTDD_EXTRA_SPACE
+from wic.misc import (exec_cmd, exec_native_cmd,
+ get_bitbake_var, BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
class BootimgPcbiosPlugin(SourcePlugin):
"""
@@ -42,33 +30,51 @@ class BootimgPcbiosPlugin(SourcePlugin):
name = 'bootimg-pcbios'
@classmethod
+ def _get_bootimg_dir(cls, bootimg_dir, dirname):
+ """
+ Check if dirname exists in default bootimg_dir or in STAGING_DIR.
+ """
+ staging_datadir = get_bitbake_var("STAGING_DATADIR")
+ for result in (bootimg_dir, staging_datadir):
+ if os.path.exists("%s/%s" % (result, dirname)):
+ return result
+
+ # STAGING_DATADIR is expanded with MLPREFIX if multilib is enabled
+ # but dependency syslinux is still populated to original STAGING_DATADIR
+ nonarch_datadir = re.sub('/[^/]*recipe-sysroot', '/recipe-sysroot', staging_datadir)
+ if os.path.exists(os.path.join(nonarch_datadir, dirname)):
+ return nonarch_datadir
+
+ raise WicError("Couldn't find correct bootimg_dir, exiting")
+
+ @classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. In this case, we install the MBR.
"""
+ bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
mbrfile = "%s/syslinux/" % bootimg_dir
if creator.ptable_format == 'msdos':
mbrfile += "mbr.bin"
elif creator.ptable_format == 'gpt':
mbrfile += "gptmbr.bin"
else:
- msger.error("Unsupported partition table: %s" % creator.ptable_format)
+ raise WicError("Unsupported partition table: %s" %
+ creator.ptable_format)
if not os.path.exists(mbrfile):
- msger.error("Couldn't find %s. If using the -e option, do you "
- "have the right MACHINE set in local.conf? If not, "
- "is the bootimg_dir path correct?" % mbrfile)
+ raise WicError("Couldn't find %s. If using the -e option, do you "
+ "have the right MACHINE set in local.conf? If not, "
+ "is the bootimg_dir path correct?" % mbrfile)
full_path = creator._full_path(workdir, disk_name, "direct")
- msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
- % (disk_name, full_path, disk['min_size']))
+ logger.debug("Installing MBR on disk %s as %s with size %s bytes",
+ disk_name, full_path, disk.min_size)
- rcode = runner.show(['dd', 'if=%s' % mbrfile,
- 'of=%s' % full_path, 'conv=notrunc'])
- if rcode != 0:
- raise ImageError("Unable to set MBR to %s" % full_path)
+ dd_cmd = "dd if=%s of=%s conv=notrunc" % (mbrfile, full_path)
+ exec_cmd(dd_cmd, native_sysroot)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
@@ -90,11 +96,11 @@ class BootimgPcbiosPlugin(SourcePlugin):
if custom_cfg:
# Use a custom configuration for grub
syslinux_conf = custom_cfg
- msger.debug("Using custom configuration file "
- "%s for syslinux.cfg" % bootloader.configfile)
+ logger.debug("Using custom configuration file %s "
+ "for syslinux.cfg", bootloader.configfile)
else:
- msger.error("configfile is specified but failed to "
- "get it from %s." % bootloader.configfile)
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % bootloader.configfile)
if not custom_cfg:
# Create syslinux configuration using parameters from wks file
@@ -122,8 +128,8 @@ class BootimgPcbiosPlugin(SourcePlugin):
syslinux_conf += "APPEND label=boot root=%s %s\n" % \
(creator.rootdev, bootloader.append)
- msger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg" \
- % cr_workdir)
+ logger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg",
+ cr_workdir)
cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
cfg.write(syslinux_conf)
cfg.close()
@@ -137,33 +143,31 @@ class BootimgPcbiosPlugin(SourcePlugin):
'prepares' the partition to be incorporated into the image.
In this case, prepare content for legacy bios boot partition.
"""
- def _has_syslinux(dirname):
- if dirname:
- syslinux = "%s/syslinux" % dirname
- if os.path.exists(syslinux):
- return True
- return False
-
- if not _has_syslinux(bootimg_dir):
- bootimg_dir = get_bitbake_var("STAGING_DATADIR")
- if not bootimg_dir:
- msger.error("Couldn't find STAGING_DATADIR, exiting\n")
- if not _has_syslinux(bootimg_dir):
- msger.error("Please build syslinux first\n")
- # just so the result notes display it
- creator.set_bootimg_dir(bootimg_dir)
+ bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
staging_kernel_dir = kernel_dir
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \
- % (staging_kernel_dir, hdddir)
- exec_cmd(install_cmd)
-
- install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \
- % (bootimg_dir, hdddir)
- exec_cmd(install_cmd)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
+ (staging_kernel_dir, kernel, hdddir),
+ "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
+ (bootimg_dir, hdddir),
+ "install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
+ (bootimg_dir, hdddir),
+ "install -m 444 %s/syslinux/libcom32.c32 %s/libcom32.c32" %
+ (bootimg_dir, hdddir),
+ "install -m 444 %s/syslinux/libutil.c32 %s/libutil.c32" %
+ (bootimg_dir, hdddir))
+
+ for install_cmd in cmds:
+ exec_cmd(install_cmd)
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
@@ -176,13 +180,14 @@ class BootimgPcbiosPlugin(SourcePlugin):
blocks += extra_blocks
- msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
- (extra_blocks, part.mountpoint, blocks))
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, part.mountpoint, blocks)
# dosfs image, created by mkdosfs
- bootimg = "%s/boot.img" % cr_workdir
+ bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
- dosfs_cmd = "mkdosfs -n boot -S 512 -C %s %d" % (bootimg, blocks)
+ dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
+ (part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
@@ -198,7 +203,5 @@ class BootimgPcbiosPlugin(SourcePlugin):
out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
- part.size = int(out.split()[0])
+ part.size = int(bootimg_size)
part.source_file = bootimg
-
-
diff --git a/scripts/lib/wic/plugins/source/fsimage.py b/scripts/lib/wic/plugins/source/fsimage.py
deleted file mode 100644
index f894e89367..0000000000
--- a/scripts/lib/wic/plugins/source/fsimage.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-
-import os
-
-from wic import msger
-from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import get_bitbake_var
-
-class FSImagePlugin(SourcePlugin):
- """
- Add an already existing filesystem image to the partition layout.
- """
-
- name = 'fsimage'
-
- @classmethod
- def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
- bootimg_dir, kernel_dir, native_sysroot):
- """
- Called after all partitions have been prepared and assembled into a
- disk image. Do nothing.
- """
- pass
-
- @classmethod
- def do_configure_partition(cls, part, source_params, cr, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- native_sysroot):
- """
- Called before do_prepare_partition(). Possibly prepare
- configuration files of some sort.
- """
- pass
-
- @classmethod
- def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- rootfs_dir, native_sysroot):
- """
- Called to do the actual content population for a partition i.e. it
- 'prepares' the partition to be incorporated into the image.
- """
- if not bootimg_dir:
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
-
- msger.debug('Bootimg dir: %s' % bootimg_dir)
-
- if 'file' not in source_params:
- msger.error("No file specified\n")
- return
-
- src = os.path.join(bootimg_dir, source_params['file'])
-
-
- msger.debug('Preparing partition using image %s' % (src))
- part.prepare_rootfs_from_fs_image(cr_workdir, src, "")
diff --git a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 3858fd439b..11326a274b 100644
--- a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
@@ -20,15 +7,18 @@
# AUTHORS
# Mihaly Varga <mihaly.varga (at] ni.com>
+import glob
+import logging
import os
import re
import shutil
-import glob
-from wic import msger
+from wic import WicError
+from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
-from wic.utils.misc import get_custom_config
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
class IsoImagePlugin(SourcePlugin):
"""
@@ -44,7 +34,7 @@ class IsoImagePlugin(SourcePlugin):
Example kickstart file:
part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi, \\
- image_name= IsoImage" --ondisk cd --label LIVECD --fstype=ext2
+ image_name= IsoImage" --ondisk cd --label LIVECD
bootloader --timeout=10 --append=" "
In --sourceparams "loader" specifies the bootloader used for booting in EFI
@@ -80,18 +70,24 @@ class IsoImagePlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/bzImage"
- syslinux_conf += "KERNEL " + kernel + "\n"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
- msger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg" \
- % cr_workdir)
+ logger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg",
+ cr_workdir)
+
with open("%s/ISO/isolinux/isolinux.cfg" % cr_workdir, "w") as cfg:
cfg.write(syslinux_conf)
@classmethod
- def do_configure_grubefi(cls, part, creator, cr_workdir):
+ def do_configure_grubefi(cls, part, creator, target_dir):
"""
Create loader-specific (grub-efi) config
"""
@@ -99,13 +95,13 @@ class IsoImagePlugin(SourcePlugin):
if configfile:
grubefi_conf = get_custom_config(configfile)
if grubefi_conf:
- msger.debug("Using custom configuration file "
- "%s for grub.cfg" % configfile)
+ logger.debug("Using custom configuration file %s for grub.cfg",
+ configfile)
else:
- msger.error("configfile is specified but failed to "
- "get it from %s." % configfile)
+ raise WicError("configfile is specified "
+ "but failed to get it from %s", configfile)
else:
- splash = os.path.join(cr_workdir, "EFI/boot/splash.jpg")
+ splash = os.path.join(target_dir, "splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
@@ -123,9 +119,13 @@ class IsoImagePlugin(SourcePlugin):
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s rootwait %s\n" \
+ grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
@@ -133,9 +133,10 @@ class IsoImagePlugin(SourcePlugin):
if splashline:
grubefi_conf += "%s\n" % splashline
- msger.debug("Writing grubefi config %s/EFI/BOOT/grub.cfg" \
- % cr_workdir)
- with open("%s/EFI/BOOT/grub.cfg" % cr_workdir, "w") as cfg:
+ cfg_path = os.path.join(target_dir, "grub.cfg")
+ logger.debug("Writing grubefi config %s", cfg_path)
+
+ with open(cfg_path, "w") as cfg:
cfg.write(grubefi_conf)
@staticmethod
@@ -144,27 +145,28 @@ class IsoImagePlugin(SourcePlugin):
Create path for initramfs image
"""
- initrd = get_bitbake_var("INITRD")
+ initrd = get_bitbake_var("INITRD_LIVE") or get_bitbake_var("INITRD")
if not initrd:
initrd_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not initrd_dir:
- msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting.\n")
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting.")
image_name = get_bitbake_var("IMAGE_BASENAME")
if not image_name:
- msger.error("Couldn't find IMAGE_BASENAME, exiting.\n")
+ raise WicError("Couldn't find IMAGE_BASENAME, exiting.")
image_type = get_bitbake_var("INITRAMFS_FSTYPES")
if not image_type:
- msger.error("Couldn't find INITRAMFS_FSTYPES, exiting.\n")
+ raise WicError("Couldn't find INITRAMFS_FSTYPES, exiting.")
- machine_arch = get_bitbake_var("MACHINE_ARCH")
- if not machine_arch:
- msger.error("Couldn't find MACHINE_ARCH, exiting.\n")
+ machine = os.path.basename(initrd_dir)
- initrd = glob.glob('%s/%s*%s.%s' % (initrd_dir, image_name, machine_arch, image_type))[0]
+ pattern = '%s/%s*%s.%s' % (initrd_dir, image_name, machine, image_type)
+ files = glob.glob(pattern)
+ if files:
+ initrd = files[0]
- if not os.path.exists(initrd):
+ if not initrd or not os.path.exists(initrd):
# Create initrd from rootfs directory
initrd = "%s/initrd.cpio.gz" % cr_workdir
initrd_dir = "%s/INITRD" % cr_workdir
@@ -183,66 +185,16 @@ class IsoImagePlugin(SourcePlugin):
os.symlink(os.readlink("%s/sbin/init" % rootfs_dir), \
"%s/init" % initrd_dir)
else:
- msger.error("Couldn't find or build initrd, exiting.\n")
+ raise WicError("Couldn't find or build initrd, exiting.")
- exec_cmd("cd %s && find . | cpio -o -H newc -R +0:+0 >./initrd.cpio " \
- % initrd_dir, as_shell=True)
- exec_cmd("gzip -f -9 -c %s/initrd.cpio > %s" \
- % (cr_workdir, initrd), as_shell=True)
+ exec_cmd("cd %s && find . | cpio -o -H newc -R root:root >%s/initrd.cpio " \
+ % (initrd_dir, cr_workdir), as_shell=True)
+ exec_cmd("gzip -f -9 %s/initrd.cpio" % cr_workdir, as_shell=True)
shutil.rmtree(initrd_dir)
return initrd
@classmethod
- def do_stage_partition(cls, part, source_params, creator, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- native_sysroot):
- """
- Special content staging called before do_prepare_partition().
- It cheks if all necessary tools are available, if not
- tries to instal them.
- """
- # Make sure parted is available in native sysroot
- if not os.path.isfile("%s/usr/sbin/parted" % native_sysroot):
- msger.info("Building parted-native...\n")
- exec_cmd("bitbake parted-native")
-
- # Make sure mkfs.ext2/3/4 is available in native sysroot
- if not os.path.isfile("%s/sbin/mkfs.ext2" % native_sysroot):
- msger.info("Building e2fsprogs-native...\n")
- exec_cmd("bitbake e2fsprogs-native")
-
- # Make sure syslinux is available in sysroot and in native sysroot
- syslinux_dir = get_bitbake_var("STAGING_DATADIR")
- if not syslinux_dir:
- msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
- if not os.path.exists("%s/syslinux" % syslinux_dir):
- msger.info("Building syslinux...\n")
- exec_cmd("bitbake syslinux")
- if not os.path.exists("%s/syslinux" % syslinux_dir):
- msger.error("Please build syslinux first\n")
-
- # Make sure syslinux is available in native sysroot
- if not os.path.exists("%s/usr/bin/syslinux" % native_sysroot):
- msger.info("Building syslinux-native...\n")
- exec_cmd("bitbake syslinux-native")
-
- #Make sure mkisofs is available in native sysroot
- if not os.path.isfile("%s/usr/bin/mkisofs" % native_sysroot):
- msger.info("Building cdrtools-native...\n")
- exec_cmd("bitbake cdrtools-native")
-
- # Make sure mkfs.vfat is available in native sysroot
- if not os.path.isfile("%s/sbin/mkfs.vfat" % native_sysroot):
- msger.info("Building dosfstools-native...\n")
- exec_cmd("bitbake dosfstools-native")
-
- # Make sure mtools is available in native sysroot
- if not os.path.isfile("%s/usr/bin/mcopy" % native_sysroot):
- msger.info("Building mtools-native...\n")
- exec_cmd("bitbake mtools-native")
-
- @classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
@@ -251,18 +203,18 @@ class IsoImagePlugin(SourcePlugin):
"""
isodir = "%s/ISO/" % cr_workdir
- if os.path.exists(cr_workdir):
- shutil.rmtree(cr_workdir)
+ if os.path.exists(isodir):
+ shutil.rmtree(isodir)
install_cmd = "install -d %s " % isodir
exec_cmd(install_cmd)
# Overwrite the name of the created image
- msger.debug("%s" % source_params)
+ logger.debug(source_params)
if 'image_name' in source_params and \
source_params['image_name'].strip():
creator.name = source_params['image_name'].strip()
- msger.debug("The name of the image is: %s" % creator.name)
+ logger.debug("The name of the image is: %s", creator.name)
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -278,7 +230,7 @@ class IsoImagePlugin(SourcePlugin):
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in rootfs_dir:
- msger.error("Couldn't find --rootfs-dir, exiting.\n")
+ raise WicError("Couldn't find --rootfs-dir, exiting.")
rootfs_dir = rootfs_dir['ROOTFS_DIR']
else:
if part.rootfs_dir in rootfs_dir:
@@ -286,144 +238,96 @@ class IsoImagePlugin(SourcePlugin):
elif part.rootfs_dir:
rootfs_dir = part.rootfs_dir
else:
- msg = "Couldn't find --rootfs-dir=%s connection "
- msg += "or it is not a valid path, exiting.\n"
- msger.error(msg % part.rootfs_dir)
+ raise WicError("Couldn't find --rootfs-dir=%s connection "
+ "or it is not a valid path, exiting." %
+ part.rootfs_dir)
if not os.path.isdir(rootfs_dir):
rootfs_dir = get_bitbake_var("IMAGE_ROOTFS")
if not os.path.isdir(rootfs_dir):
- msger.error("Couldn't find IMAGE_ROOTFS, exiting.\n")
+ raise WicError("Couldn't find IMAGE_ROOTFS, exiting.")
part.rootfs_dir = rootfs_dir
-
- # Prepare rootfs.img
- hdd_dir = get_bitbake_var("HDDDIR")
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
img_iso_dir = get_bitbake_var("ISODIR")
- rootfs_img = "%s/rootfs.img" % hdd_dir
- if not os.path.isfile(rootfs_img):
- rootfs_img = "%s/rootfs.img" % img_iso_dir
- if not os.path.isfile(rootfs_img):
- # check if rootfs.img is in deploydir
- deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- image_name = get_bitbake_var("IMAGE_LINK_NAME")
- rootfs_img = "%s/%s.%s" \
- % (deploy_dir, image_name, part.fstype)
-
- if not os.path.isfile(rootfs_img):
- # create image file with type specified by --fstype
- # which contains rootfs
- du_cmd = "du -bks %s" % rootfs_dir
- out = exec_cmd(du_cmd)
- part.size = int(out.split()[0])
- part.extra_space = 0
- part.overhead_factor = 1.2
- part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, \
- native_sysroot)
- rootfs_img = part.source_file
-
- install_cmd = "install -m 0644 %s %s/rootfs.img" \
- % (rootfs_img, isodir)
- exec_cmd(install_cmd)
-
# Remove the temporary file created by part.prepare_rootfs()
if os.path.isfile(part.source_file):
os.remove(part.source_file)
- # Prepare initial ramdisk
- initrd = "%s/initrd" % hdd_dir
- if not os.path.isfile(initrd):
- initrd = "%s/initrd" % img_iso_dir
- if not os.path.isfile(initrd):
- initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
-
- install_cmd = "install -m 0644 %s %s/initrd" \
- % (initrd, isodir)
+ # Support using a different initrd other than default
+ if source_params.get('initrd'):
+ initrd = source_params['initrd']
+ if not deploy_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+ cp_cmd = "cp %s/%s %s" % (deploy_dir, initrd, cr_workdir)
+ exec_cmd(cp_cmd)
+ else:
+ # Prepare initial ramdisk
+ initrd = "%s/initrd" % deploy_dir
+ if not os.path.isfile(initrd):
+ initrd = "%s/initrd" % img_iso_dir
+ if not os.path.isfile(initrd):
+ initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
+
+ install_cmd = "install -m 0644 %s %s/initrd" % (initrd, isodir)
exec_cmd(install_cmd)
# Remove the temporary file created by _build_initramfs_path function
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
- # Install bzImage
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (kernel_dir, isodir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot
try:
- if source_params['loader'] == 'grub-efi':
- # Builds grub.cfg if ISODIR didn't exist or
- # didn't contains grub.cfg
- bootimg_dir = img_iso_dir
- if not os.path.exists("%s/EFI/BOOT" % bootimg_dir):
- bootimg_dir = "%s/bootimg" % cr_workdir
- if os.path.exists(bootimg_dir):
- shutil.rmtree(bootimg_dir)
- install_cmd = "install -d %s/EFI/BOOT" % bootimg_dir
- exec_cmd(install_cmd)
-
- if not os.path.isfile("%s/EFI/BOOT/boot.cfg" % bootimg_dir):
- cls.do_configure_grubefi(part, creator, bootimg_dir)
+ target_dir = "%s/EFI/BOOT" % isodir
+ if os.path.exists(target_dir):
+ shutil.rmtree(target_dir)
+ os.makedirs(target_dir)
+
+ if source_params['loader'] == 'grub-efi':
# Builds bootx64.efi/bootia32.efi if ISODIR didn't exist or
# didn't contains it
target_arch = get_bitbake_var("TARGET_SYS")
if not target_arch:
- msger.error("Coludn't find target architecture\n")
+ raise WicError("Coludn't find target architecture")
if re.match("x86_64", target_arch):
- grub_target = 'x86_64-efi'
- grub_image = "bootx64.efi"
+ grub_src_image = "grub-efi-bootx64.efi"
+ grub_dest_image = "bootx64.efi"
elif re.match('i.86', target_arch):
- grub_target = 'i386-efi'
- grub_image = "bootia32.efi"
+ grub_src_image = "grub-efi-bootia32.efi"
+ grub_dest_image = "bootia32.efi"
else:
- msger.error("grub-efi is incompatible with target %s\n" \
- % target_arch)
-
- if not os.path.isfile("%s/EFI/BOOT/%s" \
- % (bootimg_dir, grub_image)):
- grub_path = get_bitbake_var("STAGING_LIBDIR")
- if not grub_path:
- msger.error("Couldn't find STAGING_LIBDIR, exiting.\n")
-
- grub_core = "%s/grub/%s" % (grub_path, grub_target)
- if not os.path.exists(grub_core):
- msger.info("Building grub-efi...\n")
- exec_cmd("bitbake grub-efi")
- if not os.path.exists(grub_core):
- msger.error("Please build grub-efi first\n")
-
- grub_cmd = "grub-mkimage -p '/EFI/BOOT' "
- grub_cmd += "-d %s " % grub_core
- grub_cmd += "-O %s -o %s/EFI/BOOT/%s " \
- % (grub_target, bootimg_dir, grub_image)
- grub_cmd += "part_gpt part_msdos ntfs ntfscomp fat ext2 "
- grub_cmd += "normal chain boot configfile linux multiboot "
- grub_cmd += "search efi_gop efi_uga font gfxterm gfxmenu "
- grub_cmd += "terminal minicmd test iorw loadenv echo help "
- grub_cmd += "reboot serial terminfo iso9660 loopback tar "
- grub_cmd += "memdisk ls search_fs_uuid udf btrfs xfs lvm "
- grub_cmd += "reiserfs ata "
- exec_native_cmd(grub_cmd, native_sysroot)
+ raise WicError("grub-efi is incompatible with target %s" %
+ target_arch)
- else:
- # TODO: insert gummiboot stuff
- msger.error("unrecognized bootimg-efi loader: %s" \
- % source_params['loader'])
- except KeyError:
- msger.error("bootimg-efi requires a loader, none specified")
+ grub_target = os.path.join(target_dir, grub_dest_image)
+ if not os.path.isfile(grub_target):
+ grub_src = os.path.join(deploy_dir, grub_src_image)
+ if not os.path.exists(grub_src):
+ raise WicError("Grub loader %s is not found in %s. "
+ "Please build grub-efi first" % (grub_src_image, deploy_dir))
+ shutil.copy(grub_src, grub_target)
- if os.path.exists("%s/EFI/BOOT" % isodir):
- shutil.rmtree("%s/EFI/BOOT" % isodir)
+ if not os.path.isfile(os.path.join(target_dir, "boot.cfg")):
+ cls.do_configure_grubefi(part, creator, target_dir)
- shutil.copytree(bootimg_dir+"/EFI/BOOT", isodir+"/EFI/BOOT")
-
- # If exists, remove cr_workdir/bootimg temporary folder
- if os.path.exists("%s/bootimg" % cr_workdir):
- shutil.rmtree("%s/bootimg" % cr_workdir)
+ else:
+ raise WicError("unrecognized bootimg-efi loader: %s" %
+ source_params['loader'])
+ except KeyError:
+ raise WicError("bootimg-efi requires a loader, none specified")
# Create efi.img that contains bootloader files for EFI booting
# if ISODIR didn't exist or didn't contains it
@@ -432,20 +336,23 @@ class IsoImagePlugin(SourcePlugin):
(img_iso_dir, isodir)
exec_cmd(install_cmd)
else:
+ # Default to 100 blocks of extra space for file system overhead
+ esp_extra_blocks = int(source_params.get('esp_extra_blocks', '100'))
+
du_cmd = "du -bks %s/EFI" % isodir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
- # Add some extra space for file system overhead
- blocks += 100
- msg = "Added 100 extra blocks to %s to get to %d total blocks" \
- % (part.mountpoint, blocks)
- msger.debug(msg)
+ blocks += esp_extra_blocks
+ logger.debug("Added 100 extra blocks to %s to get to %d "
+ "total blocks", part.mountpoint, blocks)
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
- dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
- % (bootimg, blocks)
+ esp_label = source_params.get('esp_label', 'EFIimg')
+
+ dosfs_cmd = 'mkfs.vfat -n \'%s\' -S 512 -C %s %d' \
+ % (esp_label, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mmd_cmd = "mmd -i %s ::/EFI" % bootimg
@@ -461,7 +368,7 @@ class IsoImagePlugin(SourcePlugin):
# Prepare files for legacy boot
syslinux_dir = get_bitbake_var("STAGING_DATADIR")
if not syslinux_dir:
- msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
+ raise WicError("Couldn't find STAGING_DATADIR, exiting.")
if os.path.exists("%s/isolinux" % isodir):
shutil.rmtree("%s/isolinux" % isodir)
@@ -501,7 +408,7 @@ class IsoImagePlugin(SourcePlugin):
mkisofs_cmd += "-eltorito-platform 0xEF -eltorito-boot %s " % efi_img
mkisofs_cmd += "-no-emul-boot %s " % isodir
- msger.debug("running command: %s" % mkisofs_cmd)
+ logger.debug("running command: %s", mkisofs_cmd)
exec_native_cmd(mkisofs_cmd, native_sysroot)
shutil.rmtree(isodir)
@@ -522,23 +429,19 @@ class IsoImagePlugin(SourcePlugin):
utility for booting via BIOS from disk storage devices.
"""
+ iso_img = "%s.p1" % disk.path
full_path = creator._full_path(workdir, disk_name, "direct")
- iso_img = "%s.p1" % full_path
full_path_iso = creator._full_path(workdir, disk_name, "iso")
isohybrid_cmd = "isohybrid -u %s" % iso_img
- msger.debug("running command: %s" % \
- isohybrid_cmd)
+ logger.debug("running command: %s", isohybrid_cmd)
exec_native_cmd(isohybrid_cmd, native_sysroot)
# Replace the image created by direct plugin with the one created by
# mkisofs command. This is necessary because the iso image created by
# mkisofs has a very specific MBR is system area of the ISO image, and
# direct plugin adds and configures an another MBR.
- msger.debug("Replaceing the image created by direct plugin\n")
- os.remove(full_path)
+ logger.debug("Replaceing the image created by direct plugin\n")
+ os.remove(disk.path)
shutil.copy2(iso_img, full_path_iso)
shutil.copy2(full_path_iso, full_path)
-
- # Remove temporary ISO file
- os.remove(iso_img)
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
index e0b11f95ad..82970ce51b 100644
--- a/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -1,27 +1,17 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
+import logging
import os
-from wic import msger
+from wic import WicError
from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, get_bitbake_var
+from wic.misc import exec_cmd, get_bitbake_var
from wic.filemap import sparse_copy
+logger = logging.getLogger('wic')
+
class RawCopyPlugin(SourcePlugin):
"""
Populate partition content from raw image file.
@@ -29,24 +19,24 @@ class RawCopyPlugin(SourcePlugin):
name = 'rawcopy'
- @classmethod
- def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
- bootimg_dir, kernel_dir, native_sysroot):
- """
- Called after all partitions have been prepared and assembled into a
- disk image. Do nothing.
- """
- pass
+ @staticmethod
+ def do_image_label(fstype, dst, label):
+ if fstype.startswith('ext'):
+ cmd = 'tune2fs -L %s %s' % (label, dst)
+ elif fstype in ('msdos', 'vfat'):
+ cmd = 'dosfslabel %s %s' % (dst, label)
+ elif fstype == 'btrfs':
+ cmd = 'btrfs filesystem label %s %s' % (dst, label)
+ elif fstype == 'swap':
+ cmd = 'mkswap -L %s %s' % (label, dst)
+ elif fstype == 'squashfs':
+ raise WicError("It's not possible to update a squashfs "
+ "filesystem label '%s'" % (label))
+ else:
+ raise WicError("Cannot update filesystem label: "
+ "Unknown fstype: '%s'" % (fstype))
- @classmethod
- def do_configure_partition(cls, part, source_params, cr, cr_workdir,
- oe_builddir, bootimg_dir, kernel_dir,
- native_sysroot):
- """
- Called before do_prepare_partition(). Possibly prepare
- configuration files of some sort.
- """
- pass
+ exec_cmd(cmd)
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
@@ -56,32 +46,36 @@ class RawCopyPlugin(SourcePlugin):
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
- if not bootimg_dir:
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- msger.debug('Bootimg dir: %s' % bootimg_dir)
+ logger.debug('Kernel dir: %s', kernel_dir)
if 'file' not in source_params:
- msger.error("No file specified\n")
- return
+ raise WicError("No file specified")
- src = os.path.join(bootimg_dir, source_params['file'])
+ src = os.path.join(kernel_dir, source_params['file'])
dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+ if not os.path.exists(os.path.dirname(dst)):
+ os.makedirs(os.path.dirname(dst))
+
if 'skip' in source_params:
- sparse_copy(src, dst, skip=source_params['skip'])
+ sparse_copy(src, dst, skip=int(source_params['skip']))
else:
sparse_copy(src, dst)
# get the size in the right units for kickstart (kB)
du_cmd = "du -Lbks %s" % dst
out = exec_cmd(du_cmd)
- filesize = out.split()[0]
+ filesize = int(out.split()[0])
- if int(filesize) > int(part.size):
+ if filesize > part.size:
part.size = filesize
- part.source_file = dst
+ if part.label:
+ RawCopyPlugin.do_image_label(part.fstype, dst, part.label)
+ part.source_file = dst
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index 425da8b22a..e26e95b991 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
@@ -25,11 +11,18 @@
# Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
#
+import logging
import os
+import shutil
+import sys
+
+from oe.path import copyhardlinktree
-from wic import msger
+from wic import WicError
from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import get_bitbake_var
+from wic.misc import get_bitbake_var
+
+logger = logging.getLogger('wic')
class RootfsPlugin(SourcePlugin):
"""
@@ -41,16 +34,15 @@ class RootfsPlugin(SourcePlugin):
@staticmethod
def __get_rootfs_dir(rootfs_dir):
if os.path.isdir(rootfs_dir):
- return rootfs_dir
+ return os.path.realpath(rootfs_dir)
image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
if not os.path.isdir(image_rootfs_dir):
- msg = "No valid artifact IMAGE_ROOTFS from image named"
- msg += " %s has been found at %s, exiting.\n" % \
- (rootfs_dir, image_rootfs_dir)
- msger.error(msg)
+ raise WicError("No valid artifact IMAGE_ROOTFS from image "
+ "named %s has been found at %s, exiting." %
+ (rootfs_dir, image_rootfs_dir))
- return image_rootfs_dir
+ return os.path.realpath(image_rootfs_dir)
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
@@ -63,8 +55,8 @@ class RootfsPlugin(SourcePlugin):
"""
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in krootfs_dir:
- msg = "Couldn't find --rootfs-dir, exiting"
- msger.error(msg)
+ raise WicError("Couldn't find --rootfs-dir, exiting")
+
rootfs_dir = krootfs_dir['ROOTFS_DIR']
else:
if part.rootfs_dir in krootfs_dir:
@@ -72,12 +64,49 @@ class RootfsPlugin(SourcePlugin):
elif part.rootfs_dir:
rootfs_dir = part.rootfs_dir
else:
- msg = "Couldn't find --rootfs-dir=%s connection"
- msg += " or it is not a valid path, exiting"
- msger.error(msg % part.rootfs_dir)
+ raise WicError("Couldn't find --rootfs-dir=%s connection or "
+ "it is not a valid path, exiting" % part.rootfs_dir)
+
+ part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+
+ new_rootfs = None
+ # Handle excluded paths.
+ if part.exclude_path is not None:
+ # We need a new rootfs directory we can delete files from. Copy to
+ # workdir.
+ new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
+
+ if os.path.lexists(new_rootfs):
+ shutil.rmtree(os.path.join(new_rootfs))
+
+ copyhardlinktree(part.rootfs_dir, new_rootfs)
+
+ for orig_path in part.exclude_path:
+ path = orig_path
+ if os.path.isabs(path):
+ logger.error("Must be relative: --exclude-path=%s" % orig_path)
+ sys.exit(1)
+
+ full_path = os.path.realpath(os.path.join(new_rootfs, path))
- real_rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+ # Disallow climbing outside of parent directory using '..',
+ # because doing so could be quite disastrous (we will delete the
+ # directory).
+ if not full_path.startswith(new_rootfs):
+ logger.error("'%s' points to a path outside the rootfs" % orig_path)
+ sys.exit(1)
- part.rootfs_dir = real_rootfs_dir
- part.prepare_rootfs(cr_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
+ if path.endswith(os.sep):
+ # Delete content only.
+ for entry in os.listdir(full_path):
+ full_entry = os.path.join(full_path, entry)
+ if os.path.isdir(full_entry) and not os.path.islink(full_entry):
+ shutil.rmtree(full_entry)
+ else:
+ os.remove(full_entry)
+ else:
+ # Delete whole directory.
+ shutil.rmtree(full_path)
+ part.prepare_rootfs(cr_workdir, oe_builddir,
+ new_rootfs or part.rootfs_dir, native_sysroot)
diff --git a/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py b/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
deleted file mode 100644
index 3d60e6f0ff..0000000000
--- a/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can distribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for mo details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# AUTHOR
-# Adrian Freihofer <adrian.freihofer (at] neratec.com>
-#
-
-import os
-from wic import msger
-from wic.utils import syslinux
-from wic.utils import runner
-from wic.utils.oe import misc
-from wic.utils.errors import ImageError
-from wic.pluginbase import SourcePlugin
-
-
-# pylint: disable=no-init
-class RootfsPlugin(SourcePlugin):
- """
- Create root partition and install syslinux bootloader
-
- This plugin creates a disk image containing a bootable root partition with
- syslinux installed. The filesystem is ext2/3/4, no extra boot partition is
- required.
-
- Example kickstart file:
- part / --source rootfs-pcbios-ext --ondisk sda --fstype=ext4 --label rootfs --align 1024
- bootloader --source rootfs-pcbios-ext --timeout=0 --append="rootwait rootfstype=ext4"
-
- The first line generates a root file system including a syslinux.cfg file
- The "--source rootfs-pcbios-ext" in the second line triggers the installation
- of ldlinux.sys into the image.
- """
-
- name = 'rootfs-pcbios-ext'
-
- @staticmethod
- def _get_rootfs_dir(rootfs_dir):
- """
- Find rootfs pseudo dir
-
- If rootfs_dir is a directory consider it as rootfs directory.
- Otherwise ask bitbake about the IMAGE_ROOTFS directory.
- """
- if os.path.isdir(rootfs_dir):
- return rootfs_dir
-
- image_rootfs_dir = misc.get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
- if not os.path.isdir(image_rootfs_dir):
- msg = "No valid artifact IMAGE_ROOTFS from image named"
- msg += " %s has been found at %s, exiting.\n" % \
- (rootfs_dir, image_rootfs_dir)
- msger.error(msg)
-
- return image_rootfs_dir
-
- # pylint: disable=unused-argument
- @classmethod
- def do_configure_partition(cls, part, source_params, image_creator,
- image_creator_workdir, oe_builddir, bootimg_dir,
- kernel_dir, native_sysroot):
- """
- Creates syslinux config in rootfs directory
-
- Called before do_prepare_partition()
- """
- bootloader = image_creator.ks.bootloader
-
- syslinux_conf = ""
- syslinux_conf += "PROMPT 0\n"
-
- syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
- syslinux_conf += "ALLOWOPTIONS 1\n"
-
- # Derive SERIAL... line from from kernel boot parameters
- syslinux_conf += syslinux.serial_console_form_kargs(options) + "\n"
-
- syslinux_conf += "DEFAULT linux\n"
- syslinux_conf += "LABEL linux\n"
- syslinux_conf += " KERNEL /boot/bzImage\n"
-
- syslinux_conf += " APPEND label=boot root=%s %s\n" % \
- (image_creator.rootdev, bootloader.append)
-
- syslinux_cfg = os.path.join(image_creator.rootfs_dir['ROOTFS_DIR'], "boot", "syslinux.cfg")
- msger.debug("Writing syslinux config %s" % syslinux_cfg)
- with open(syslinux_cfg, "w") as cfg:
- cfg.write(syslinux_conf)
-
- @classmethod
- def do_prepare_partition(cls, part, source_params, image_creator,
- image_creator_workdir, oe_builddir, bootimg_dir,
- kernel_dir, krootfs_dir, native_sysroot):
- """
- Creates partition out of rootfs directory
-
- Prepare content for a rootfs partition i.e. create a partition
- and fill it from a /rootfs dir.
- Install syslinux bootloader into root partition image file
- """
- def is_exe(exepath):
- """Verify exepath is an executable file"""
- return os.path.isfile(exepath) and os.access(exepath, os.X_OK)
-
- # Make sure syslinux-nomtools is available in native sysroot or fail
- native_syslinux_nomtools = os.path.join(native_sysroot, "usr/bin/syslinux-nomtools")
- if not is_exe(native_syslinux_nomtools):
- msger.info("building syslinux-native...")
- misc.exec_cmd("bitbake syslinux-native")
- if not is_exe(native_syslinux_nomtools):
- msger.error("Couldn't find syslinux-nomtools (%s), exiting\n" %
- native_syslinux_nomtools)
-
- if part.rootfs is None:
- if 'ROOTFS_DIR' not in krootfs_dir:
- msger.error("Couldn't find --rootfs-dir, exiting")
- rootfs_dir = krootfs_dir['ROOTFS_DIR']
- else:
- if part.rootfs in krootfs_dir:
- rootfs_dir = krootfs_dir[part.rootfs]
- elif part.rootfs:
- rootfs_dir = part.rootfs
- else:
- msg = "Couldn't find --rootfs-dir=%s connection"
- msg += " or it is not a valid path, exiting"
- msger.error(msg % part.rootfs)
-
- real_rootfs_dir = cls._get_rootfs_dir(rootfs_dir)
-
- part.rootfs_dir = real_rootfs_dir
- part.prepare_rootfs(image_creator_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
-
- # install syslinux into rootfs partition
- syslinux_cmd = "syslinux-nomtools -d /boot -i %s" % part.source_file
- misc.exec_native_cmd(syslinux_cmd, native_sysroot)
-
- @classmethod
- def do_install_disk(cls, disk, disk_name, image_creator, workdir, oe_builddir,
- bootimg_dir, kernel_dir, native_sysroot):
- """
- Assemble partitions to disk image
-
- Called after all partitions have been prepared and assembled into a
- disk image. In this case, we install the MBR.
- """
- mbrfile = os.path.join(native_sysroot, "usr/share/syslinux/")
- if image_creator.ptable_format == 'msdos':
- mbrfile += "mbr.bin"
- elif image_creator.ptable_format == 'gpt':
- mbrfile += "gptmbr.bin"
- else:
- msger.error("Unsupported partition table: %s" % \
- image_creator.ptable_format)
-
- if not os.path.exists(mbrfile):
- msger.error("Couldn't find %s. Has syslinux-native been baked?" % mbrfile)
-
- full_path = disk['disk'].device
- msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
- % (disk_name, full_path, disk['min_size']))
-
- ret_code = runner.show(['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc'])
- if ret_code != 0:
- raise ImageError("Unable to set MBR to %s" % full_path)
diff --git a/scripts/lib/wic/test b/scripts/lib/wic/test
deleted file mode 100644
index 9daeafb986..0000000000
--- a/scripts/lib/wic/test
+++ /dev/null
@@ -1 +0,0 @@
-test
diff --git a/scripts/lib/wic/utils/errors.py b/scripts/lib/wic/utils/errors.py
deleted file mode 100644
index d1b514dd9d..0000000000
--- a/scripts/lib/wic/utils/errors.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2007 Red Hat, Inc.
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-class WicError(Exception):
- pass
-
-class CreatorError(WicError):
- pass
-
-class Usage(WicError):
- pass
-
-class ImageError(WicError):
- pass
diff --git a/scripts/lib/wic/utils/misc.py b/scripts/lib/wic/utils/misc.py
deleted file mode 100644
index 1415ae906c..0000000000
--- a/scripts/lib/wic/utils/misc.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2010, 2011 Intel Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import time
-import wic.engine
-
-def build_name(kscfg, release=None, prefix=None, suffix=None):
- """Construct and return an image name string.
-
- This is a utility function to help create sensible name and fslabel
- strings. The name is constructed using the sans-prefix-and-extension
- kickstart filename and the supplied prefix and suffix.
-
- kscfg -- a path to a kickstart file
- release -- a replacement to suffix for image release
- prefix -- a prefix to prepend to the name; defaults to None, which causes
- no prefix to be used
- suffix -- a suffix to append to the name; defaults to None, which causes
- a YYYYMMDDHHMM suffix to be used
-
- Note, if maxlen is less then the len(suffix), you get to keep both pieces.
-
- """
- name = os.path.basename(kscfg)
- idx = name.rfind('.')
- if idx >= 0:
- name = name[:idx]
-
- if release is not None:
- suffix = ""
- if prefix is None:
- prefix = ""
- if suffix is None:
- suffix = time.strftime("%Y%m%d%H%M")
-
- if name.startswith(prefix):
- name = name[len(prefix):]
-
- prefix = "%s-" % prefix if prefix else ""
- suffix = "-%s" % suffix if suffix else ""
-
- ret = prefix + name + suffix
-
- return ret
-
-def find_canned(scripts_path, file_name):
- """
- Find a file either by its path or by name in the canned files dir.
-
- Return None if not found
- """
- if os.path.exists(file_name):
- return file_name
-
- layers_canned_wks_dir = wic.engine.build_canned_image_list(scripts_path)
- for canned_wks_dir in layers_canned_wks_dir:
- for root, dirs, files in os.walk(canned_wks_dir):
- for fname in files:
- if fname == file_name:
- fullpath = os.path.join(canned_wks_dir, fname)
- return fullpath
-
-def get_custom_config(boot_file):
- """
- Get the custom configuration to be used for the bootloader.
-
- Return None if the file can't be found.
- """
- scripts_path = os.path.abspath(os.path.dirname(__file__))
- # Get the scripts path of poky
- for x in range(0, 3):
- scripts_path = os.path.dirname(scripts_path)
-
- cfg_file = find_canned(scripts_path, boot_file)
- if cfg_file:
- with open(cfg_file, "r") as f:
- config = f.read()
- return config
-
- return None
diff --git a/scripts/lib/wic/utils/oe/__init__.py b/scripts/lib/wic/utils/oe/__init__.py
deleted file mode 100644
index 0a81575a74..0000000000
--- a/scripts/lib/wic/utils/oe/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# OpenEmbedded wic utils library
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
diff --git a/scripts/lib/wic/utils/partitionedfs.py b/scripts/lib/wic/utils/partitionedfs.py
deleted file mode 100644
index cafb9338df..0000000000
--- a/scripts/lib/wic/utils/partitionedfs.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-# Copyright (c) 2007, 2008 Red Hat, Inc.
-# Copyright (c) 2008 Daniel P. Berrange
-# Copyright (c) 2008 David P. Huff
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-from wic import msger
-from wic.utils.errors import ImageError
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-from wic.filemap import sparse_copy
-
-# Overhead of the MBR partitioning scheme (just one sector)
-MBR_OVERHEAD = 1
-
-# Overhead of the GPT partitioning scheme
-GPT_OVERHEAD = 34
-
-# Size of a sector in bytes
-SECTOR_SIZE = 512
-
-class Image():
- """
- Generic base object for an image.
-
- An Image is a container for a set of DiskImages and associated
- partitions.
- """
- def __init__(self, native_sysroot=None):
- self.disks = {}
- self.partitions = []
- self.partimages = []
- # Size of a sector used in calculations
- self.sector_size = SECTOR_SIZE
- self._partitions_layed_out = False
- self.native_sysroot = native_sysroot
-
- def __add_disk(self, disk_name):
- """ Add a disk 'disk_name' to the internal list of disks. Note,
- 'disk_name' is the name of the disk in the target system
- (e.g., sdb). """
-
- if disk_name in self.disks:
- # We already have this disk
- return
-
- assert not self._partitions_layed_out
-
- self.disks[disk_name] = \
- {'disk': None, # Disk object
- 'numpart': 0, # Number of allocate partitions
- 'realpart': 0, # Number of partitions in the partition table
- 'partitions': [], # Indexes to self.partitions
- 'offset': 0, # Offset of next partition (in sectors)
- # Minimum required disk size to fit all partitions (in bytes)
- 'min_size': 0,
- 'ptable_format': "msdos"} # Partition table format
-
- def add_disk(self, disk_name, disk_obj):
- """ Add a disk object which have to be partitioned. More than one disk
- can be added. In case of multiple disks, disk partitions have to be
- added for each disk separately with 'add_partition()". """
-
- self.__add_disk(disk_name)
- self.disks[disk_name]['disk'] = disk_obj
-
- def __add_partition(self, part):
- """ This is a helper function for 'add_partition()' which adds a
- partition to the internal list of partitions. """
-
- assert not self._partitions_layed_out
-
- self.partitions.append(part)
- self.__add_disk(part['disk_name'])
-
- def add_partition(self, size, disk_name, mountpoint, source_file=None, fstype=None,
- label=None, fsopts=None, boot=False, align=None, no_table=False,
- part_type=None, uuid=None, system_id=None):
- """ Add the next partition. Prtitions have to be added in the
- first-to-last order. """
-
- ks_pnum = len(self.partitions)
-
- # Converting kB to sectors for parted
- size = size * 1024 // self.sector_size
-
- part = {'ks_pnum': ks_pnum, # Partition number in the KS file
- 'size': size, # In sectors
- 'mountpoint': mountpoint, # Mount relative to chroot
- 'source_file': source_file, # partition contents
- 'fstype': fstype, # Filesystem type
- 'fsopts': fsopts, # Filesystem mount options
- 'label': label, # Partition label
- 'disk_name': disk_name, # physical disk name holding partition
- 'device': None, # kpartx device node for partition
- 'num': None, # Partition number
- 'boot': boot, # Bootable flag
- 'align': align, # Partition alignment
- 'no_table' : no_table, # Partition does not appear in partition table
- 'part_type' : part_type, # Partition type
- 'uuid': uuid, # Partition UUID
- 'system_id': system_id} # Partition system id
-
- self.__add_partition(part)
-
- def layout_partitions(self, ptable_format="msdos"):
- """ Layout the partitions, meaning calculate the position of every
- partition on the disk. The 'ptable_format' parameter defines the
- partition table format and may be "msdos". """
-
- msger.debug("Assigning %s partitions to disks" % ptable_format)
-
- if self._partitions_layed_out:
- return
-
- self._partitions_layed_out = True
-
- # Go through partitions in the order they are added in .ks file
- for num in range(len(self.partitions)):
- part = self.partitions[num]
-
- if part['disk_name'] not in self.disks:
- raise ImageError("No disk %s for partition %s" \
- % (part['disk_name'], part['mountpoint']))
-
- if ptable_format == 'msdos' and part['part_type']:
- # The --part-type can also be implemented for MBR partitions,
- # in which case it would map to the 1-byte "partition type"
- # filed at offset 3 of the partition entry.
- raise ImageError("setting custom partition type is not " \
- "implemented for msdos partitions")
-
- # Get the disk where the partition is located
- disk = self.disks[part['disk_name']]
- disk['numpart'] += 1
- if not part['no_table']:
- disk['realpart'] += 1
- disk['ptable_format'] = ptable_format
-
- if disk['numpart'] == 1:
- if ptable_format == "msdos":
- overhead = MBR_OVERHEAD
- elif ptable_format == "gpt":
- overhead = GPT_OVERHEAD
-
- # Skip one sector required for the partitioning scheme overhead
- disk['offset'] += overhead
-
- if disk['realpart'] > 3:
- # Reserve a sector for EBR for every logical partition
- # before alignment is performed.
- if ptable_format == "msdos":
- disk['offset'] += 1
-
-
- if part['align']:
- # If not first partition and we do have alignment set we need
- # to align the partition.
- # FIXME: This leaves a empty spaces to the disk. To fill the
- # gaps we could enlargea the previous partition?
-
- # Calc how much the alignment is off.
- align_sectors = disk['offset'] % (part['align'] * 1024 // self.sector_size)
-
- if align_sectors:
- # If partition is not aligned as required, we need
- # to move forward to the next alignment point
- align_sectors = (part['align'] * 1024 // self.sector_size) - align_sectors
-
- msger.debug("Realignment for %s%s with %s sectors, original"
- " offset %s, target alignment is %sK." %
- (part['disk_name'], disk['numpart'], align_sectors,
- disk['offset'], part['align']))
-
- # increase the offset so we actually start the partition on right alignment
- disk['offset'] += align_sectors
-
- part['start'] = disk['offset']
- disk['offset'] += part['size']
-
- part['type'] = 'primary'
- if not part['no_table']:
- part['num'] = disk['realpart']
- else:
- part['num'] = 0
-
- if disk['ptable_format'] == "msdos":
- if disk['realpart'] > 3:
- part['type'] = 'logical'
- part['num'] = disk['realpart'] + 1
-
- disk['partitions'].append(num)
- msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
- "sectors (%d bytes)." \
- % (part['mountpoint'], part['disk_name'], part['num'],
- part['start'], part['start'] + part['size'] - 1,
- part['size'], part['size'] * self.sector_size))
-
- # Once all the partitions have been layed out, we can calculate the
- # minumim disk sizes.
- for disk in self.disks.values():
- disk['min_size'] = disk['offset']
- if disk['ptable_format'] == "gpt":
- disk['min_size'] += GPT_OVERHEAD
-
- disk['min_size'] *= self.sector_size
-
- def __create_partition(self, device, parttype, fstype, start, size):
- """ Create a partition on an image described by the 'device' object. """
-
- # Start is included to the size so we need to substract one from the end.
- end = start + size - 1
- msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
- (parttype, start, end, size))
-
- cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
- if fstype:
- cmd += " %s" % fstype
- cmd += " %d %d" % (start, end)
-
- return exec_native_cmd(cmd, self.native_sysroot)
-
- def __format_disks(self):
- self.layout_partitions()
-
- for dev in self.disks:
- disk = self.disks[dev]
- msger.debug("Initializing partition table for %s" % \
- (disk['disk'].device))
- exec_native_cmd("parted -s %s mklabel %s" % \
- (disk['disk'].device, disk['ptable_format']),
- self.native_sysroot)
-
- msger.debug("Creating partitions")
-
- for part in self.partitions:
- if part['num'] == 0:
- continue
-
- disk = self.disks[part['disk_name']]
- if disk['ptable_format'] == "msdos" and part['num'] == 5:
- # Create an extended partition (note: extended
- # partition is described in MBR and contains all
- # logical partitions). The logical partitions save a
- # sector for an EBR just before the start of a
- # partition. The extended partition must start one
- # sector before the start of the first logical
- # partition. This way the first EBR is inside of the
- # extended partition. Since the extended partitions
- # starts a sector before the first logical partition,
- # add a sector at the back, so that there is enough
- # room for all logical partitions.
- self.__create_partition(disk['disk'].device, "extended",
- None, part['start'] - 1,
- disk['offset'] - part['start'] + 1)
-
- if part['fstype'] == "swap":
- parted_fs_type = "linux-swap"
- elif part['fstype'] == "vfat":
- parted_fs_type = "fat32"
- elif part['fstype'] == "msdos":
- parted_fs_type = "fat16"
- elif part['fstype'] == "ontrackdm6aux3":
- parted_fs_type = "ontrackdm6aux3"
- else:
- # Type for ext2/ext3/ext4/btrfs
- parted_fs_type = "ext2"
-
- # Boot ROM of OMAP boards require vfat boot partition to have an
- # even number of sectors.
- if part['mountpoint'] == "/boot" and part['fstype'] in ["vfat", "msdos"] \
- and part['size'] % 2:
- msger.debug("Substracting one sector from '%s' partition to " \
- "get even number of sectors for the partition" % \
- part['mountpoint'])
- part['size'] -= 1
-
- self.__create_partition(disk['disk'].device, part['type'],
- parted_fs_type, part['start'], part['size'])
-
- if part['part_type']:
- msger.debug("partition %d: set type UID to %s" % \
- (part['num'], part['part_type']))
- exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
- (part['num'], part['part_type'],
- disk['disk'].device), self.native_sysroot)
-
- if part['uuid']:
- msger.debug("partition %d: set UUID to %s" % \
- (part['num'], part['uuid']))
- exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
- (part['num'], part['uuid'], disk['disk'].device),
- self.native_sysroot)
-
- if part['boot']:
- flag_name = "legacy_boot" if disk['ptable_format'] == 'gpt' else "boot"
- msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
- (flag_name, part['num'], disk['disk'].device))
- exec_native_cmd("parted -s %s set %d %s on" % \
- (disk['disk'].device, part['num'], flag_name),
- self.native_sysroot)
- if part['system_id']:
- exec_native_cmd("sfdisk --part-type %s %s %s" % \
- (disk['disk'].device, part['num'], part['system_id']),
- self.native_sysroot)
-
- # Parted defaults to enabling the lba flag for fat16 partitions,
- # which causes compatibility issues with some firmware (and really
- # isn't necessary).
- if parted_fs_type == "fat16":
- if disk['ptable_format'] == 'msdos':
- msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
- (part['num'], disk['disk'].device))
- exec_native_cmd("parted -s %s set %d lba off" % \
- (disk['disk'].device, part['num']),
- self.native_sysroot)
-
- def cleanup(self):
- if self.disks:
- for dev in self.disks:
- disk = self.disks[dev]
- try:
- disk['disk'].cleanup()
- except:
- pass
- # remove partition images
- for image in self.partimages:
- if os.path.isfile(image):
- os.remove(image)
-
- def assemble(self, image_file):
- msger.debug("Installing partitions")
-
- for part in self.partitions:
- source = part['source_file']
- if source:
- # install source_file contents into a partition
- sparse_copy(source, image_file, part['start'] * self.sector_size)
-
- msger.debug("Installed %s in partition %d, sectors %d-%d, "
- "size %d sectors" % \
- (source, part['num'], part['start'],
- part['start'] + part['size'] - 1, part['size']))
-
- partimage = image_file + '.p%d' % part['num']
- os.rename(source, partimage)
- self.partimages.append(partimage)
-
- def create(self):
- for dev in self.disks:
- disk = self.disks[dev]
- disk['disk'].create()
-
- self.__format_disks()
-
- return
diff --git a/scripts/lib/wic/utils/runner.py b/scripts/lib/wic/utils/runner.py
deleted file mode 100644
index db536ba588..0000000000
--- a/scripts/lib/wic/utils/runner.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import subprocess
-
-from wic import msger
-
-def runtool(cmdln_or_args, catch=1):
- """ wrapper for most of the subprocess calls
- input:
- cmdln_or_args: can be both args and cmdln str (shell=True)
- catch: 0, quitely run
- 1, only STDOUT
- 2, only STDERR
- 3, both STDOUT and STDERR
- return:
- (rc, output)
- if catch==0: the output will always None
- """
-
- if catch not in (0, 1, 2, 3):
- # invalid catch selection, will cause exception, that's good
- return None
-
- if isinstance(cmdln_or_args, list):
- cmd = cmdln_or_args[0]
- shell = False
- else:
- import shlex
- cmd = shlex.split(cmdln_or_args)[0]
- shell = True
-
- if catch != 3:
- dev_null = os.open("/dev/null", os.O_WRONLY)
-
- if catch == 0:
- sout = dev_null
- serr = dev_null
- elif catch == 1:
- sout = subprocess.PIPE
- serr = dev_null
- elif catch == 2:
- sout = dev_null
- serr = subprocess.PIPE
- elif catch == 3:
- sout = subprocess.PIPE
- serr = subprocess.STDOUT
-
- try:
- process = subprocess.Popen(cmdln_or_args, stdout=sout,
- stderr=serr, shell=shell)
- (sout, serr) = process.communicate()
- # combine stdout and stderr, filter None out and decode
- out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
- except OSError as err:
- if err.errno == 2:
- # [Errno 2] No such file or directory
- msger.error('Cannot run command: %s, lost dependency?' % cmd)
- else:
- raise # relay
- finally:
- if catch != 3:
- os.close(dev_null)
-
- return (process.returncode, out)
-
-def show(cmdln_or_args):
- # show all the message using msger.verbose
-
- rcode, out = runtool(cmdln_or_args, catch=3)
-
- if isinstance(cmdln_or_args, list):
- cmd = ' '.join(cmdln_or_args)
- else:
- cmd = cmdln_or_args
-
- msg = 'running command: "%s"' % cmd
- if out:
- out = out.strip()
- if out:
- msg += ', with output::'
- msg += '\n +----------------'
- for line in out.splitlines():
- msg += '\n | %s' % line
- msg += '\n +----------------'
-
- msger.verbose(msg)
- return rcode
-
-def outs(cmdln_or_args, catch=1):
- # get the outputs of tools
- return runtool(cmdln_or_args, catch)[1].strip()
-
-def quiet(cmdln_or_args):
- return runtool(cmdln_or_args, catch=0)[0]
diff --git a/scripts/lib/wic/utils/syslinux.py b/scripts/lib/wic/utils/syslinux.py
deleted file mode 100644
index aace2863c1..0000000000
--- a/scripts/lib/wic/utils/syslinux.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# AUTHOR
-# Adrian Freihofer <adrian.freihofer (at] neratec.com>
-
-
-import re
-from wic import msger
-
-
-def serial_console_form_kargs(kernel_args):
- """
- Create SERIAL... line from kernel parameters
-
- syslinux needs a line SERIAL port [baudrate [flowcontrol]]
- in the syslinux.cfg file. The config line is generated based
- on kernel boot parameters. The the parameters of the first
- ttyS console are considered for syslinux config.
- @param kernel_args kernel command line
- @return line for syslinux config file e.g. "SERIAL 0 115200"
- """
- syslinux_conf = ""
- for param in kernel_args.split():
- param_match = re.match("console=ttyS([0-9]+),?([0-9]*)([noe]?)([0-9]?)(r?)", param)
- if param_match:
- syslinux_conf += "SERIAL " + param_match.group(1)
- # baudrate
- if param_match.group(2):
- syslinux_conf += " " + param_match.group(2)
- # parity
- if param_match.group(3) and param_match.group(3) != 'n':
- msger.warning("syslinux does not support parity for console. {} is ignored."
- .format(param_match.group(3)))
- # number of bits
- if param_match.group(4) and param_match.group(4) != '8':
- msger.warning("syslinux supports 8 bit console configuration only. {} is ignored."
- .format(param_match.group(4)))
- # flow control
- if param_match.group(5) and param_match.group(5) != '':
- msger.warning("syslinux console flowcontrol configuration. {} is ignored."
- .format(param_match.group(5)))
- break
-
- return syslinux_conf
diff --git a/scripts/lnr b/scripts/lnr
index 5fed780eb2..a2ac4fec0f 100755
--- a/scripts/lnr
+++ b/scripts/lnr
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Create a *relative* symlink, just like ln --relative does but without needing
# coreutils 8.16.
diff --git a/scripts/multilib_header_wrapper.h b/scripts/multilib_header_wrapper.h
index 5a87540884..c81e7ee5e8 100644
--- a/scripts/multilib_header_wrapper.h
+++ b/scripts/multilib_header_wrapper.h
@@ -1,31 +1,30 @@
/*
* Copyright (C) 2005-2011 by Wind River Systems, Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
*/
-#include <bits/wordsize.h>
+#pragma once
-#ifdef __WORDSIZE
+#if defined (__bpf__)
+#define __MHWORDSIZE 64
+#elif defined (__arm__)
+#define __MHWORDSIZE 32
+#elif defined (__aarch64__) && defined ( __LP64__)
+#define __MHWORDSIZE 64
+#elif defined (__aarch64__)
+#define __MHWORDSIZE 32
+#else
+#include <bits/wordsize.h>
+#if defined (__WORDSIZE)
+#define __MHWORDSIZE __WORDSIZE
+#else
+#error "__WORDSIZE is not defined"
+#endif
+#endif
-#if __WORDSIZE == 32
+#if __MHWORDSIZE == 32
#ifdef _MIPS_SIM
@@ -41,15 +40,9 @@
#include <ENTER_HEADER_FILENAME_HERE-32.h>
#endif
-#elif __WORDSIZE == 64
+#elif __MHWORDSIZE == 64
#include <ENTER_HEADER_FILENAME_HERE-64.h>
#else
#error "Unknown __WORDSIZE detected"
#endif /* matches #if __WORDSIZE == 32 */
-
-#else /* __WORDSIZE is not defined */
-
-#error "__WORDSIZE is not defined"
-
-#endif
diff --git a/scripts/native-intercept/chgrp b/scripts/native-intercept/chgrp
new file mode 100755
index 0000000000..399c979f9a
--- /dev/null
+++ b/scripts/native-intercept/chgrp
@@ -0,0 +1,5 @@
+#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/native-intercept/chown b/scripts/native-intercept/chown
index 4f43271c2b..399c979f9a 100755
--- a/scripts/native-intercept/chown
+++ b/scripts/native-intercept/chown
@@ -1,2 +1,5 @@
#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/oe-build-perf-report b/scripts/oe-build-perf-report
new file mode 100755
index 0000000000..21bde7e156
--- /dev/null
+++ b/scripts/oe-build-perf-report
@@ -0,0 +1,607 @@
+#!/usr/bin/python3
+#
+# Examine build performance test results
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import json
+import logging
+import os
+import re
+import sys
+from collections import namedtuple, OrderedDict
+from operator import attrgetter
+from xml.etree import ElementTree as ET
+
+# Import oe libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+from build_perf import print_table
+from build_perf.report import (metadata_xml_to_json, results_xml_to_json,
+ aggregate_data, aggregate_metadata, measurement_stats,
+ AggregateTestData)
+from build_perf import html
+from buildstats import BuildStats, diff_buildstats, BSVerDiff
+
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo, GitError
+import oeqa.utils.gitarchive as gitarchive
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+def list_test_revs(repo, tag_name, verbosity, **kwargs):
+ """Get list of all tested revisions"""
+ valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
+
+ fields, revs = gitarchive.get_test_runs(log, repo, tag_name, **valid_kwargs)
+ ignore_fields = ['tag_number']
+ if verbosity < 2:
+ extra_fields = ['COMMITS', 'TEST RUNS']
+ ignore_fields.extend(['commit_number', 'commit'])
+ else:
+ extra_fields = ['TEST RUNS']
+
+ print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields]
+
+ # Sort revs
+ rows = [[fields[i].upper() for i in print_fields] + extra_fields]
+
+ prev = [''] * len(print_fields)
+ prev_commit = None
+ commit_cnt = 0
+ commit_field = fields.index('commit')
+ for rev in revs:
+ # Only use fields that we want to print
+ cols = [rev[i] for i in print_fields]
+
+
+ if cols != prev:
+ commit_cnt = 1
+ test_run_cnt = 1
+ new_row = [''] * (len(print_fields) + len(extra_fields))
+
+ for i in print_fields:
+ if cols[i] != prev[i]:
+ break
+ new_row[i:-len(extra_fields)] = cols[i:]
+ rows.append(new_row)
+ else:
+ if rev[commit_field] != prev_commit:
+ commit_cnt += 1
+ test_run_cnt += 1
+
+ if verbosity < 2:
+ new_row[-2] = commit_cnt
+ new_row[-1] = test_run_cnt
+ prev = cols
+ prev_commit = rev[commit_field]
+
+ print_table(rows)
+
+def is_xml_format(repo, commit):
+ """Check if the commit contains xml (or json) data"""
+ if repo.rev_parse(commit + ':results.xml'):
+ log.debug("Detected report in xml format in %s", commit)
+ return True
+ else:
+ log.debug("No xml report in %s, assuming json formatted results", commit)
+ return False
+
+def read_results(repo, tags, xml=True):
+ """Read result files from repo"""
+
+ def parse_xml_stream(data):
+ """Parse multiple concatenated XML objects"""
+ objs = []
+ xml_d = ""
+ for line in data.splitlines():
+ if xml_d and line.startswith('<?xml version='):
+ objs.append(ET.fromstring(xml_d))
+ xml_d = line
+ else:
+ xml_d += line
+ objs.append(ET.fromstring(xml_d))
+ return objs
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+ return objs
+
+ num_revs = len(tags)
+
+ # Optimize by reading all data with one git command
+ log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0])
+ if xml:
+ git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags]
+ data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+ return ([metadata_xml_to_json(e) for e in data[0:num_revs]],
+ [results_xml_to_json(e) for e in data[num_revs:]])
+ else:
+ git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags]
+ data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+ return data[0:num_revs], data[num_revs:]
+
+
+def get_data_item(data, key):
+ """Nested getitem lookup"""
+ for k in key.split('.'):
+ data = data[k]
+ return data
+
+
+def metadata_diff(metadata_l, metadata_r):
+ """Prepare a metadata diff for printing"""
+ keys = [('Hostname', 'hostname', 'hostname'),
+ ('Branch', 'branch', 'layers.meta.branch'),
+ ('Commit number', 'commit_num', 'layers.meta.commit_count'),
+ ('Commit', 'commit', 'layers.meta.commit'),
+ ('Number of test runs', 'testrun_count', 'testrun_count')
+ ]
+
+ def _metadata_diff(key):
+ """Diff metadata from two test reports"""
+ try:
+ val1 = get_data_item(metadata_l, key)
+ except KeyError:
+ val1 = '(N/A)'
+ try:
+ val2 = get_data_item(metadata_r, key)
+ except KeyError:
+ val2 = '(N/A)'
+ return val1, val2
+
+ metadata = OrderedDict()
+ for title, key, key_json in keys:
+ value_l, value_r = _metadata_diff(key_json)
+ metadata[key] = {'title': title,
+ 'value_old': value_l,
+ 'value': value_r}
+ return metadata
+
+
+def print_diff_report(metadata_l, data_l, metadata_r, data_r):
+ """Print differences between two data sets"""
+
+ # First, print general metadata
+ print("\nTEST METADATA:\n==============")
+ meta_diff = metadata_diff(metadata_l, metadata_r)
+ rows = []
+ row_fmt = ['{:{wid}} ', '{:<{wid}} ', '{:<{wid}}']
+ rows = [['', 'CURRENT COMMIT', 'COMPARING WITH']]
+ for key, val in meta_diff.items():
+ # Shorten commit hashes
+ if key == 'commit':
+ rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]])
+ else:
+ rows.append([val['title'] + ':', val['value'], val['value_old']])
+ print_table(rows, row_fmt)
+
+
+ # Print test results
+ print("\nTEST RESULTS:\n=============")
+
+ tests = list(data_l['tests'].keys())
+ # Append tests that are only present in 'right' set
+ tests += [t for t in list(data_r['tests'].keys()) if t not in tests]
+
+ # Prepare data to be printed
+ rows = []
+ row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', ' {:>{wid}}', ' {:{wid}} ', '{:{wid}}',
+ ' {:>{wid}}', ' {:>{wid}}']
+ num_cols = len(row_fmt)
+ for test in tests:
+ test_l = data_l['tests'][test] if test in data_l['tests'] else None
+ test_r = data_r['tests'][test] if test in data_r['tests'] else None
+ pref = ' '
+ if test_l is None:
+ pref = '+'
+ elif test_r is None:
+ pref = '-'
+ descr = test_l['description'] if test_l else test_r['description']
+ heading = "{} {}: {}".format(pref, test, descr)
+
+ rows.append([heading])
+
+ # Generate the list of measurements
+ meas_l = test_l['measurements'] if test_l else {}
+ meas_r = test_r['measurements'] if test_r else {}
+ measurements = list(meas_l.keys())
+ measurements += [m for m in list(meas_r.keys()) if m not in measurements]
+
+ for meas in measurements:
+ m_pref = ' '
+ if meas in meas_l:
+ stats_l = measurement_stats(meas_l[meas], 'l.')
+ else:
+ stats_l = measurement_stats(None, 'l.')
+ m_pref = '+'
+ if meas in meas_r:
+ stats_r = measurement_stats(meas_r[meas], 'r.')
+ else:
+ stats_r = measurement_stats(None, 'r.')
+ m_pref = '-'
+ stats = stats_l.copy()
+ stats.update(stats_r)
+
+ absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean'])
+ reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean'])
+ if stats['r.mean'] > stats['l.mean']:
+ absdiff = '+' + str(absdiff)
+ else:
+ absdiff = str(absdiff)
+ rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'],
+ str(stats['l.mean']), '->', str(stats['r.mean']),
+ absdiff, reldiff])
+ rows.append([''] * num_cols)
+
+ print_table(rows, row_fmt)
+
+ print()
+
+
+class BSSummary(object):
+ def __init__(self, bs1, bs2):
+ self.tasks = {'count': bs2.num_tasks,
+ 'change': '{:+d}'.format(bs2.num_tasks - bs1.num_tasks)}
+ self.top_consumer = None
+ self.top_decrease = None
+ self.top_increase = None
+ self.ver_diff = OrderedDict()
+
+ tasks_diff = diff_buildstats(bs1, bs2, 'cputime')
+
+ # Get top consumers of resources
+ tasks_diff = sorted(tasks_diff, key=attrgetter('value2'))
+ self.top_consumer = tasks_diff[-5:]
+
+ # Get biggest increase and decrease in resource usage
+ tasks_diff = sorted(tasks_diff, key=attrgetter('absdiff'))
+ self.top_decrease = tasks_diff[0:5]
+ self.top_increase = tasks_diff[-5:]
+
+ # Compare recipe versions and prepare data for display
+ ver_diff = BSVerDiff(bs1, bs2)
+ if ver_diff:
+ if ver_diff.new:
+ self.ver_diff['New recipes'] = [(n, r.evr) for n, r in ver_diff.new.items()]
+ if ver_diff.dropped:
+ self.ver_diff['Dropped recipes'] = [(n, r.evr) for n, r in ver_diff.dropped.items()]
+ if ver_diff.echanged:
+ self.ver_diff['Epoch changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.echanged.items()]
+ if ver_diff.vchanged:
+ self.ver_diff['Version changed'] = [(n, "{} &rarr; {}".format(r.left.version, r.right.version)) for n, r in ver_diff.vchanged.items()]
+ if ver_diff.rchanged:
+ self.ver_diff['Revision changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.rchanged.items()]
+
+
+def print_html_report(data, id_comp, buildstats):
+ """Print report in html format"""
+ # Handle metadata
+ metadata = metadata_diff(data[id_comp].metadata, data[-1].metadata)
+
+ # Generate list of tests
+ tests = []
+ for test in data[-1].results['tests'].keys():
+ test_r = data[-1].results['tests'][test]
+ new_test = {'name': test_r['name'],
+ 'description': test_r['description'],
+ 'status': test_r['status'],
+ 'measurements': [],
+ 'err_type': test_r.get('err_type'),
+ }
+ # Limit length of err output shown
+ if 'message' in test_r:
+ lines = test_r['message'].splitlines()
+ if len(lines) > 20:
+ new_test['message'] = '...\n' + '\n'.join(lines[-20:])
+ else:
+ new_test['message'] = test_r['message']
+
+
+ # Generate the list of measurements
+ for meas in test_r['measurements'].keys():
+ meas_r = test_r['measurements'][meas]
+ meas_type = 'time' if meas_r['type'] == 'sysres' else 'size'
+ new_meas = {'name': meas_r['name'],
+ 'legend': meas_r['legend'],
+ 'description': meas_r['name'] + ' ' + meas_type,
+ }
+ samples = []
+
+ # Run through all revisions in our data
+ for meta, test_data in data:
+ if (not test in test_data['tests'] or
+ not meas in test_data['tests'][test]['measurements']):
+ samples.append(measurement_stats(None))
+ continue
+ test_i = test_data['tests'][test]
+ meas_i = test_i['measurements'][meas]
+ commit_num = get_data_item(meta, 'layers.meta.commit_count')
+ samples.append(measurement_stats(meas_i))
+ samples[-1]['commit_num'] = commit_num
+
+ absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean'])
+ reldiff = absdiff * 100 / samples[id_comp]['mean']
+ new_meas['absdiff'] = absdiff
+ new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff)
+ new_meas['reldiff'] = reldiff
+ new_meas['reldiff_str'] = "{:+.1f} %".format(reldiff)
+ new_meas['samples'] = samples
+ new_meas['value'] = samples[-1]
+ new_meas['value_type'] = samples[-1]['val_cls']
+
+ # Compare buildstats
+ bs_key = test + '.' + meas
+ rev = str(metadata['commit_num']['value'])
+ comp_rev = str(metadata['commit_num']['value_old'])
+ if (rev in buildstats and bs_key in buildstats[rev] and
+ comp_rev in buildstats and bs_key in buildstats[comp_rev]):
+ new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
+ buildstats[rev][bs_key])
+
+
+ new_test['measurements'].append(new_meas)
+ tests.append(new_test)
+
+ # Chart options
+ chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'),
+ 'max': get_data_item(data[-1][0], 'layers.meta.commit_count')}
+ }
+
+ print(html.template.render(title="Build Perf Test Report",
+ metadata=metadata, test_data=tests,
+ chart_opts=chart_opts))
+
+
+def get_buildstats(repo, notes_ref, revs, outdir=None):
+ """Get the buildstats from git notes"""
+ full_ref = 'refs/notes/' + notes_ref
+ if not repo.rev_parse(full_ref):
+ log.error("No buildstats found, please try running "
+ "'git fetch origin %s:%s' to fetch them from the remote",
+ full_ref, full_ref)
+ return
+
+ missing = False
+ buildstats = {}
+ log.info("Parsing buildstats from 'refs/notes/%s'", notes_ref)
+ for rev in revs:
+ buildstats[rev.commit_number] = {}
+ log.debug('Dumping buildstats for %s (%s)', rev.commit_number,
+ rev.commit)
+ for tag in rev.tags:
+ log.debug(' %s', tag)
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
+ 'show', tag + '^0']))
+ except GitError:
+ log.warning("Buildstats not found for %s", tag)
+ bs_all = {}
+ missing = True
+
+ for measurement, bs in bs_all.items():
+ # Write out onto disk
+ if outdir:
+ tag_base, run_id = tag.rsplit('/', 1)
+ tag_base = tag_base.replace('/', '_')
+ bs_dir = os.path.join(outdir, measurement, tag_base)
+ if not os.path.exists(bs_dir):
+ os.makedirs(bs_dir)
+ with open(os.path.join(bs_dir, run_id + '.json'), 'w') as f:
+ json.dump(bs, f, indent=2)
+
+ # Read buildstats into a dict
+ _bs = BuildStats.from_json(bs)
+ if measurement not in buildstats[rev.commit_number]:
+ buildstats[rev.commit_number][measurement] = _bs
+ else:
+ buildstats[rev.commit_number][measurement].aggregate(_bs)
+
+ if missing:
+ log.info("Buildstats were missing for some test runs, please "
+ "run 'git fetch origin %s:%s' and try again",
+ full_ref, full_ref)
+
+ return buildstats
+
+
+def auto_args(repo, args):
+ """Guess arguments, if not defined by the user"""
+ # Get the latest commit in the repo
+ log.debug("Guessing arguments from the latest commit")
+ msg = repo.run_cmd(['log', '-1', '--branches', '--remotes', '--format=%b'])
+ for line in msg.splitlines():
+ split = line.split(':', 1)
+ if len(split) != 2:
+ continue
+
+ key = split[0]
+ val = split[1].strip()
+ if key == 'hostname' and not args.hostname:
+ log.debug("Using hostname %s", val)
+ args.hostname = val
+ elif key == 'branch' and not args.branch:
+ log.debug("Using branch %s", val)
+ args.branch = val
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ description = """
+Examine build performance test results from a Git repository"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--repo', '-r', required=True,
+ help="Results repository (local git clone)")
+ parser.add_argument('--list', '-l', action='count',
+ help="List available test runs")
+ parser.add_argument('--html', action='store_true',
+ help="Generate report in html format")
+ group = parser.add_argument_group('Tag and revision')
+ group.add_argument('--tag-name', '-t',
+ default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
+ help="Tag name (pattern) for finding results")
+ group.add_argument('--hostname', '-H')
+ group.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ group.add_argument('--branch2', help="Branch to find comparision revisions in")
+ group.add_argument('--machine', default='qemux86')
+ group.add_argument('--history-length', default=25, type=int,
+ help="Number of tested revisions to plot in html report")
+ group.add_argument('--commit',
+ help="Revision to search for")
+ group.add_argument('--commit-number',
+ help="Revision number to search for, redundant if "
+ "--commit is specified")
+ group.add_argument('--commit2',
+ help="Revision to compare with")
+ group.add_argument('--commit-number2',
+ help="Revision number to compare with, redundant if "
+ "--commit2 is specified")
+ parser.add_argument('--dump-buildstats', nargs='?', const='.',
+ help="Dump buildstats of the tests")
+
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ repo = GitRepo(args.repo)
+
+ if args.list:
+ list_test_revs(repo, args.tag_name, args.list, hostname=args.hostname)
+ return 0
+
+ # Determine hostname which to use
+ if not args.hostname:
+ auto_args(repo, args)
+
+ revs = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch, machine=args.machine)
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch2, machine=args.machine)
+ if not len(revs2):
+ log.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ log.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ log.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ log.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ log.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ log.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ index_l = min(index1, index2)
+ index_r = max(index1, index2)
+
+ rev_l = revs[index_l]
+ rev_r = revs[index_r]
+ log.debug("Using 'left' revision %s (%s), %s test runs:\n %s",
+ rev_l.commit_number, rev_l.commit, len(rev_l.tags),
+ '\n '.join(rev_l.tags))
+ log.debug("Using 'right' revision %s (%s), %s test runs:\n %s",
+ rev_r.commit_number, rev_r.commit, len(rev_r.tags),
+ '\n '.join(rev_r.tags))
+
+ # Check report format used in the repo (assume all reports in the same fmt)
+ xml = is_xml_format(repo, revs[index_r].tags[-1])
+
+ if args.html:
+ index_0 = max(0, min(index_l, index_r - args.history_length))
+ rev_range = range(index_0, index_r + 1)
+ else:
+ # We do not need range of commits for text report (no graphs)
+ index_0 = index_l
+ rev_range = (index_l, index_r)
+
+ # Read raw data
+ log.debug("Reading %d revisions, starting from %s (%s)",
+ len(rev_range), revs[index_0].commit_number, revs[index_0].commit)
+ raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range]
+
+ data = []
+ for raw_m, raw_d in raw_data:
+ data.append(AggregateTestData(aggregate_metadata(raw_m),
+ aggregate_data(raw_d)))
+
+ # Read buildstats only when needed
+ buildstats = None
+ if args.dump_buildstats or args.html:
+ outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
+ args.machine)
+ buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+
+ # Print report
+ if not args.html:
+ print_diff_report(data[0].metadata, data[0].results,
+ data[1].metadata, data[1].results)
+ else:
+ # Re-map 'left' list index to the data table where index_0 maps to 0
+ print_html_report(data, index_l - index_0, buildstats)
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test
index bb5c382d26..00e00b4ce9 100755
--- a/scripts/oe-build-perf-test
+++ b/scripts/oe-build-perf-test
@@ -1,37 +1,32 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-"""Build performance test script"""
+
import argparse
import errno
import fcntl
+import json
import logging
import os
+import re
import shutil
import sys
-import unittest
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
import scriptpath
scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
import oeqa.buildperf
from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
BuildPerfTestRunner, KernelDropCaches)
from oeqa.utils.commands import runCmd
-from oeqa.utils.git import GitRepo, GitError
+from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
# Set-up logging
@@ -71,31 +66,6 @@ def pre_run_sanity_check():
return False
return True
-def init_git_repo(path):
- """Check/create Git repository where to store results"""
- path = os.path.abspath(path)
- if os.path.isfile(path):
- log.error("Invalid Git repo %s: path exists but is not a directory", path)
- return False
- if not os.path.isdir(path):
- try:
- os.mkdir(path)
- except (FileNotFoundError, PermissionError) as err:
- log.error("Failed to mkdir %s: %s", path, err)
- return False
- if not os.listdir(path):
- log.info("Initializing a new Git repo at %s", path)
- GitRepo.init(path)
- try:
- GitRepo(path, is_topdir=True)
- except GitError:
- log.error("No Git repository but a non-empty directory found at %s.\n"
- "Please specify a Git repository, an empty directory or "
- "a non-existing directory", path)
- return False
- return True
-
-
def setup_file_logging(log_file):
"""Setup loggin to file"""
log_dir = os.path.dirname(log_file)
@@ -115,6 +85,38 @@ def archive_build_conf(out_dir):
shutil.copytree(src_dir, tgt_dir)
+def update_globalres_file(result_obj, filename, metadata):
+ """Write results to globalres csv file"""
+ # Map test names to time and size columns in globalres
+ # The tuples represent index and length of times and sizes
+ # respectively
+ gr_map = {'test1': ((0, 1), (8, 1)),
+ 'test12': ((1, 1), (None, None)),
+ 'test13': ((2, 1), (9, 1)),
+ 'test2': ((3, 1), (None, None)),
+ 'test3': ((4, 3), (None, None)),
+ 'test4': ((7, 1), (10, 2))}
+
+ values = ['0'] * 12
+ for status, test, _ in result_obj.all_results():
+ if status in ['ERROR', 'SKIPPED']:
+ continue
+ (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
+ if t_ind is not None:
+ values[t_ind:t_ind + t_len] = test.times
+ if s_ind is not None:
+ values[s_ind:s_ind + s_len] = test.sizes
+
+ log.debug("Writing globalres log to %s", filename)
+ rev_info = metadata['layers']['meta']
+ with open(filename, 'a') as fobj:
+ fobj.write('{},{}:{},{},'.format(metadata['hostname'],
+ rev_info['branch'],
+ rev_info['commit'],
+ rev_info['commit']))
+ fobj.write(','.join(values) + '\n')
+
+
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
@@ -131,20 +133,13 @@ def parse_args(argv):
parser.add_argument('-o', '--out-dir', default='results-{date}',
type=os.path.abspath,
help="Output directory for test results")
+ parser.add_argument('-x', '--xml', action='store_true',
+ help='Enable JUnit xml output')
parser.add_argument('--log-file',
default='{out_dir}/oe-build-perf-test.log',
help="Log file of this script")
parser.add_argument('--run-tests', nargs='+', metavar='TEST',
help="List of tests to run")
- parser.add_argument('--commit-results', metavar='GIT_DIR',
- type=os.path.abspath,
- help="Commit result data to a (local) git repository")
- parser.add_argument('--commit-results-branch', metavar='BRANCH',
- default="{git_branch}",
- help="Commit results to branch BRANCH.")
- parser.add_argument('--commit-results-tag', metavar='TAG',
- default="{git_branch}/{git_commit_count}-g{git_commit}/{tag_num}",
- help="Tag results commit with TAG.")
return parser.parse_args(argv)
@@ -167,9 +162,6 @@ def main(argv=None):
if not pre_run_sanity_check():
return 1
- if args.commit_results:
- if not init_git_repo(args.commit_results):
- return 1
# Check our capability to drop caches and ask pass if needed
KernelDropCaches.check()
@@ -181,7 +173,19 @@ def main(argv=None):
else:
suite = loader.loadTestsFromModule(oeqa.buildperf)
+ # Save test metadata
+ metadata = metadata_from_bb()
+ log.info("Testing Git revision branch:commit %s:%s (%s)",
+ metadata['layers']['meta']['branch'],
+ metadata['layers']['meta']['commit'],
+ metadata['layers']['meta']['commit_count'])
+ if args.xml:
+ write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
+ else:
+ with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
+ json.dump(metadata, fobj, indent=2)
archive_build_conf(out_dir)
+
runner = BuildPerfTestRunner(out_dir, verbosity=2)
# Suppress logger output to stderr so that the output from unittest
@@ -194,16 +198,17 @@ def main(argv=None):
# Restore logger output to stderr
log.handlers[0].setLevel(log.level)
+ if args.xml:
+ result.write_results_xml()
+ else:
+ result.write_results_json()
+ result.write_buildstats_json()
if args.globalres_file:
- result.update_globalres_file(args.globalres_file)
- if args.commit_results:
- result.git_commit_results(args.commit_results,
- args.commit_results_branch,
- args.commit_results_tag)
+ update_globalres_file(result, args.globalres_file, metadata)
if result.wasSuccessful():
return 0
- return 1
+ return 2
if __name__ == '__main__':
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
index 9fae3b4ec3..c17cf4da71 100755
--- a/scripts/oe-buildenv-internal
+++ b/scripts/oe-buildenv-internal
@@ -4,19 +4,19 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+if ! $(return >/dev/null 2>&1) ; then
+ echo 'oe-buildenv-internal: error: this script must be sourced'
+ echo ''
+ echo 'Usage: . $OEROOT/scripts/oe-buildenv-internal &&'
+ echo ''
+ echo 'OpenEmbedded oe-buildenv-internal - an internal script that is'
+ echo 'used in oe-init-build-env to initialize oe build environment'
+ echo ''
+ exit 2
+fi
# It is assumed OEROOT is already defined when this is called
if [ -z "$OEROOT" ]; then
@@ -29,22 +29,6 @@ if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
return 1
fi
-# Make sure we're not using python v3.x as 'python', we don't support it.
-py_v2_check=$(/usr/bin/env python --version 2>&1 | grep "Python 3")
-if [ -n "$py_v2_check" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please set up python v2 as your default 'python' interpreter."
- return 1
-fi
-unset py_v2_check
-
-py_v27_check=$(python -c 'import sys; print sys.version_info >= (2,7,3)')
-if [ "$py_v27_check" != "True" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please upgrade your python v2."
-fi
-unset py_v27_check
-
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
@@ -90,10 +74,14 @@ unset BDIR
if [ -z "$BITBAKEDIR" ]; then
BITBAKEDIR="$OEROOT/bitbake$BBEXTRA"
+ test -d "$BITBAKEDIR" || BITBAKEDIR="$OEROOT/../bitbake$BBEXTRA"
fi
BITBAKEDIR=$(readlink -f "$BITBAKEDIR")
BUILDDIR=$(readlink -f "$BUILDDIR")
+BBPATH=$BUILDDIR
+
+export BBPATH
if [ ! -d "$BITBAKEDIR" ]; then
echo >&2 "Error: The bitbake directory ($BITBAKEDIR) does not exist! Please ensure a copy of bitbake exists at this location or specify an alternative path on the command line"
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index d06efe436a..ca249ca67b 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -5,18 +5,8 @@
# Copyright 2016 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
new file mode 100755
index 0000000000..5eb3e12769
--- /dev/null
+++ b/scripts/oe-depends-dot
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import argparse
+import logging
+import re
+
+class Dot(object):
+ def __init__(self):
+ parser = argparse.ArgumentParser(
+ description="Analyse recipe-depends.dot generated by bitbake -g",
+ epilog="Use %(prog)s --help to get help")
+ parser.add_argument("dotfile",
+ help = "Specify the dotfile", nargs = 1, action='store', default='')
+ parser.add_argument("-k", "--key",
+ help = "Specify the key, e.g., recipe name",
+ action="store", default='')
+ parser.add_argument("-d", "--depends",
+ help = "Print the key's dependencies",
+ action="store_true", default=False)
+ parser.add_argument("-w", "--why",
+ help = "Print why the key is built",
+ action="store_true", default=False)
+ parser.add_argument("-r", "--remove",
+ help = "Remove duplicated dependencies to reduce the size of the dot files."
+ " For example, A->B, B->C, A->C, then A->C can be removed.",
+ action="store_true", default=False)
+
+ self.args = parser.parse_args()
+
+ if len(sys.argv) != 3 and len(sys.argv) < 5:
+ print('ERROR: Not enough args, see --help for usage')
+
+ @staticmethod
+ def insert_dep_chain(chain, rdeps, alldeps):
+ """
+ insert elements to chain from rdeps, according to alldeps
+ """
+ # chain should at least contain one element
+ if len(chain) == 0:
+ raise
+
+ inserted_elements = []
+ for rdep in rdeps:
+ if rdep in chain:
+ continue
+ else:
+ for i in range(0, len(chain)-1):
+ if chain[i] in alldeps[rdep] and rdep in alldeps[chain[i+1]]:
+ chain.insert(i+1, rdep)
+ inserted_elements.append(rdep)
+ break
+ if chain[-1] in alldeps[rdep] and rdep not in chain:
+ chain.append(rdep)
+ inserted_elements.append(rdep)
+ return inserted_elements
+
+ @staticmethod
+ def print_dep_chains(key, rdeps, alldeps):
+ rlist = rdeps.copy()
+ chain = []
+ removed_rdeps = [] # hold rdeps removed from rlist
+
+ chain.append(key)
+ while (len(rlist) != 0):
+ # insert chain from rlist
+ inserted_elements = Dot.insert_dep_chain(chain, rlist, alldeps)
+ if not inserted_elements:
+ if chain[-1] in rlist:
+ rlist.remove(chain[-1])
+ removed_rdeps.append(chain[-1])
+ chain.pop()
+ continue
+ else:
+ # insert chain from removed_rdeps
+ Dot.insert_dep_chain(chain, removed_rdeps, alldeps)
+ print(' -> '.join(list(reversed(chain))))
+
+ def main(self):
+ #print(self.args.dotfile[0])
+ # The format is {key: depends}
+ depends = {}
+ with open(self.args.dotfile[0], 'r') as f:
+ for line in f.readlines():
+ if ' -> ' not in line:
+ continue
+ line_no_quotes = line.replace('"', '')
+ m = re.match("(.*) -> (.*)", line_no_quotes)
+ if not m:
+ print('WARNING: Found unexpected line: %s' % line)
+ continue
+ key = m.group(1)
+ if key == "meta-world-pkgdata":
+ continue
+ dep = m.group(2)
+ if key in depends:
+ if not key in depends[key]:
+ depends[key].add(dep)
+ else:
+ print('WARNING: Fonud duplicated line: %s' % line)
+ else:
+ depends[key] = set()
+ depends[key].add(dep)
+
+ if self.args.remove:
+ reduced_depends = {}
+ for k, deps in depends.items():
+ child_deps = set()
+ added = set()
+ # Both direct and indirect depends are already in the dict, so
+ # we don't have to do this recursively.
+ for dep in deps:
+ if dep in depends:
+ child_deps |= depends[dep]
+
+ reduced_depends[k] = deps - child_deps
+ outfile= '%s-reduced%s' % (self.args.dotfile[0][:-4], self.args.dotfile[0][-4:])
+ with open(outfile, 'w') as f:
+ print('Saving reduced dot file to %s' % outfile)
+ f.write('digraph depends {\n')
+ for k, v in reduced_depends.items():
+ for dep in v:
+ f.write('"%s" -> "%s"\n' % (k, dep))
+ f.write('}\n')
+ sys.exit(0)
+
+ if self.args.key not in depends:
+ print("ERROR: Can't find key %s in %s" % (self.args.key, self.args.dotfile[0]))
+ sys.exit(1)
+
+ if self.args.depends:
+ if self.args.key in depends:
+ print('Depends: %s' % ' '.join(depends[self.args.key]))
+
+ reverse_deps = []
+ if self.args.why:
+ for k, v in depends.items():
+ if self.args.key in v and not k in reverse_deps:
+ reverse_deps.append(k)
+ print('Because: %s' % ' '.join(reverse_deps))
+ Dot.print_dep_chains(self.args.key, reverse_deps, depends)
+
+if __name__ == "__main__":
+ try:
+ dot = Dot()
+ ret = dot.main()
+ except Exception as esc:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
index 81d62b8882..5146bbf999 100755
--- a/scripts/oe-find-native-sysroot
+++ b/scripts/oe-find-native-sysroot
@@ -2,14 +2,14 @@
#
# Find a native sysroot to use - either from an in-tree OE build or
# from a toolchain installation. It then ensures the variable
-# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets
+# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets
# $PSEUDO to the path of the pseudo binary.
#
# This script is intended to be run within other scripts by source'ing
# it, e.g:
#
# SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot`
-# . $SYSROOT_SETUP_SCRIPT
+# . $SYSROOT_SETUP_SCRIPT <recipe>
#
# This script will terminate execution of your calling program unless
# you set a variable $SKIP_STRICT_SYSROOT_CHECK to a non-empty string
@@ -17,18 +17,40 @@
#
# Copyright (c) 2010 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
+ echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
+ echo ''
+ echo 'OpenEmbedded find-native-sysroot - helper script to set'
+ echo 'environment variables OECORE_NATIVE_SYSROOT and PSEUDO'
+ echo 'to the path of the native sysroot directory and pseudo'
+ echo 'executable binary'
+ echo ''
+ echo 'options:'
+ echo ' recipe its STAGING_DIR_NATIVE is used as native sysroot'
+ echo ' -h, --help show this help message and exit'
+ echo ''
+ exit 2
+fi
+
+# Global vars
+BITBAKE_E=""
+set_oe_native_sysroot(){
+ echo "Running bitbake -e $1"
+ BITBAKE_E="`bitbake -e $1`"
+ OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE= | cut -d '"' -f2`
+
+ if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
+ # This indicates that there was an error running bitbake -e that
+ # the user needs to be informed of
+ echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
+ echo "Here is the output from bitbake -e $1"
+ echo $BITBAKE_E
+ exit 1
+ fi
+}
if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
BITBAKE=`which bitbake 2> /dev/null`
@@ -40,10 +62,10 @@ if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
exit 1
fi
touch conf/sanity.conf
- OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '"' -f2`
+ set_oe_native_sysroot $1
rm -f conf/sanity.conf
else
- OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '"' -f2`
+ set_oe_native_sysroot $1
fi
else
echo "Error: Unable to locate bitbake command."
@@ -55,21 +77,15 @@ if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
fi
fi
-if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
- # This indicates that there was an error running bitbake -e that
- # the user needs to be informed of
- echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
- echo "Here is the output from bitbake -e"
- bitbake -e
- exit 1
-fi
-
-# Set up pseudo command
-if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/pseudo" ]; then
- echo "Error: Unable to find pseudo binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
+if [ ! -e "$OECORE_NATIVE_SYSROOT/" ]; then
+ echo "Error: $OECORE_NATIVE_SYSROOT doesn't exist."
if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
- echo "Have you run 'bitbake meta-ide-support'?"
+ if [[ $1 =~ .*native.* ]]; then
+ echo "Have you run 'bitbake $1 -caddto_recipe_sysroot'?"
+ else
+ echo "Have you run 'bitbake $1 '?"
+ fi
else
echo "This shouldn't happen - something is wrong with your toolchain installation"
fi
@@ -78,4 +94,10 @@ if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/pseudo" ]; then
exit 1
fi
fi
-PSEUDO="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
+
+# Set up pseudo command
+pseudo="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
+if [ -e "$pseudo" ]; then
+ echo "PSEUDO=$pseudo"
+ PSEUDO="$pseudo"
+fi
diff --git a/scripts/oe-git-archive b/scripts/oe-git-archive
new file mode 100755
index 0000000000..9305ed0b0f
--- /dev/null
+++ b/scripts/oe-git-archive
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Helper script for committing data to git and pushing upstream
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import logging
+import os
+import re
+import sys
+
+# Import oe and bitbake libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo, GitError
+from oeqa.utils.metadata import metadata_from_bb
+import oeqa.utils.gitarchive as gitarchive
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger()
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ parser = argparse.ArgumentParser(
+ description="Commit data to git and push upstream",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument('--debug', '-D', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--git-dir', '-g', required=True,
+ help="Local git directory to use")
+ parser.add_argument('--no-create', action='store_true',
+ help="If GIT_DIR is not a valid Git repository, do not "
+ "try to create one")
+ parser.add_argument('--bare', action='store_true',
+ help="Initialize a bare repository when creating a "
+ "new one")
+ parser.add_argument('--push', '-p', nargs='?', default=False, const=True,
+ help="Push to remote")
+ parser.add_argument('--branch-name', '-b',
+ default='{hostname}/{branch}/{machine}',
+ help="Git branch name (pattern) to use")
+ parser.add_argument('--no-tag', action='store_true',
+ help="Do not create Git tag")
+ parser.add_argument('--tag-name', '-t',
+ default='{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}',
+ help="Tag name (pattern) to use")
+ parser.add_argument('--commit-msg-subject',
+ default='Results of {branch}:{commit} on {hostname}',
+ help="Subject line (pattern) to use in the commit message")
+ parser.add_argument('--commit-msg-body',
+ default='branch: {branch}\ncommit: {commit}\nhostname: {hostname}',
+ help="Commit message body (pattern)")
+ parser.add_argument('--tag-msg-subject',
+ default='Test run #{tag_number} of {branch}:{commit} on {hostname}',
+ help="Subject line (pattern) of the tag message")
+ parser.add_argument('--tag-msg-body',
+ default='',
+ help="Tag message body (pattern)")
+ parser.add_argument('--exclude', action='append', default=[],
+ help="Glob to exclude files from the commit. Relative "
+ "to DATA_DIR. May be specified multiple times")
+ parser.add_argument('--notes', nargs=2, action='append', default=[],
+ metavar=('GIT_REF', 'FILE'),
+ help="Add a file as a note under refs/notes/GIT_REF. "
+ "{branch_name} in GIT_REF will be expanded to the "
+ "actual target branch name (specified by "
+ "--branch-name). This option may be specified "
+ "multiple times.")
+ parser.add_argument('data_dir', metavar='DATA_DIR',
+ help="Data to commit")
+ return parser.parse_args(argv)
+
+def get_nested(d, list_of_keys):
+ try:
+ for k in list_of_keys:
+ d = d[k]
+ return d
+ except KeyError:
+ return ""
+
+def main(argv=None):
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ try:
+ # Get keywords to be used in tag and branch names and messages
+ metadata = metadata_from_bb()
+ keywords = {'hostname': get_nested(metadata, ['hostname']),
+ 'branch': get_nested(metadata, ['layers', 'meta', 'branch']),
+ 'commit': get_nested(metadata, ['layers', 'meta', 'commit']),
+ 'commit_count': get_nested(metadata, ['layers', 'meta', 'commit_count']),
+ 'machine': get_nested(metadata, ['config', 'MACHINE'])}
+
+ gitarchive.gitarchive(args.data_dir, args.git_dir, args.no_create, args.bare,
+ args.commit_msg_subject.strip(), args.commit_msg_body, args.branch_name,
+ args.no_tag, args.tag_name, args.tag_msg_subject, args.tag_msg_body,
+ args.exclude, args.notes, args.push, keywords, log)
+
+ except gitarchive.ArchiveError as err:
+ log.error(str(err))
+ return 1
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/oe-git-proxy b/scripts/oe-git-proxy
index 0078e95450..aa9b9dc9a9 100755
--- a/scripts/oe-git-proxy
+++ b/scripts/oe-git-proxy
@@ -13,16 +13,43 @@
# ALL_PROXY=https://proxy.example.com:8080
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
# AUTHORS
# Darren Hart <dvhart@linux.intel.com>
+# disable pathname expansion, NO_PROXY fields could start with "*" or be it
+set -f
+
+if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+ echo 'oe-git-proxy: error: the following arguments are required: host port'
+ echo 'Usage: oe-git-proxy host port'
+ echo ''
+ echo 'OpenEmbedded git-proxy - a simple tool to be used via GIT_PROXY_COMMAND.'
+ echo 'It uses socat to make SOCKS or HTTPS proxy connections.'
+ echo 'It uses ALL_PROXY to determine the proxy server, protocol, and port.'
+ echo 'It uses NO_PROXY to skip using the proxy for a comma delimited list'
+ echo 'of hosts, host globs (*.example.com), IPs, or CIDR masks (192.168.1.0/24).'
+ echo 'It is known to work with both bash and dash shells.runs native tools'
+ echo ''
+ echo 'arguments:'
+ echo ' host proxy host to use'
+ echo ' port proxy port to use'
+ echo ''
+ echo 'options:'
+ echo ' -h, --help show this help message and exit'
+ echo ''
+ exit 2
+fi
+
# Locate the netcat binary
-SOCAT=$(which socat 2>/dev/null)
-if [ $? -ne 0 ]; then
- echo "ERROR: socat binary not in PATH" 1>&2
- exit 1
+if [ -z "$SOCAT" ]; then
+ SOCAT=$(which socat 2>/dev/null)
+ if [ $? -ne 0 ]; then
+ echo "ERROR: socat binary not in PATH" 1>&2
+ exit 1
+ fi
fi
METHOD=""
@@ -37,7 +64,7 @@ ipv4_val() {
IP="$1"
SHIFT=24
VAL=0
- for B in ${IP//./ }; do
+ for B in $( echo "$IP" | tr '.' ' ' ); do
VAL=$(($VAL+$(($B<<$SHIFT))))
SHIFT=$(($SHIFT-8))
done
@@ -80,7 +107,7 @@ match_host() {
HOST=$1
GLOB=$2
- if [ -z "${HOST%%$GLOB}" ]; then
+ if [ -z "${HOST%%*$GLOB}" ]; then
return 0
fi
@@ -110,7 +137,7 @@ if [ -z "$ALL_PROXY" ]; then
fi
# Connect directly to hosts in NO_PROXY
-for H in ${NO_PROXY//,/ }; do
+for H in $( echo "$NO_PROXY" | tr ',' ' ' ); do
if match_host $1 $H; then
exec $SOCAT STDIO $METHOD
fi
diff --git a/scripts/oe-gnome-terminal-phonehome b/scripts/oe-gnome-terminal-phonehome
index e02354883a..b6b9a3867b 100755
--- a/scripts/oe-gnome-terminal-phonehome
+++ b/scripts/oe-gnome-terminal-phonehome
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Gnome terminal won't tell us which PID a given command is run as
# or allow a single instance so we can't tell when it completes.
# This allows us to figure out the PID of the target so we can tell
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index bb917b4fc4..93220e3617 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -6,18 +6,7 @@
#
# Copyright 2012-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -40,9 +29,8 @@ def tinfoil_init():
import bb.tinfoil
import logging
tinfoil = bb.tinfoil.Tinfoil()
- tinfoil.prepare(True)
-
tinfoil.logger.setLevel(logging.WARNING)
+ tinfoil.prepare(True)
return tinfoil
@@ -174,15 +162,16 @@ def read_value(args):
logger.error("No packages specified")
sys.exit(1)
- def readvar(pkgdata_file, valuename):
+ def readvar(pkgdata_file, valuename, mappedpkg):
val = ""
with open(pkgdata_file, 'r') as f:
for line in f:
- if line.startswith(valuename + ":"):
+ if (line.startswith(valuename + ":") or
+ line.startswith(valuename + "_" + mappedpkg + ":")):
val = line.split(': ', 1)[1].rstrip()
return val
- logger.debug("read-value('%s', '%s' '%s'" % (args.pkgdata_dir, args.valuename, packages))
+ logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
for package in packages:
pkg_split = package.split('_')
pkg_name = pkg_split[0]
@@ -192,18 +181,21 @@ def read_value(args):
if os.path.exists(revlink):
mappedpkg = os.path.basename(os.readlink(revlink))
qvar = args.valuename
+ value = readvar(revlink, qvar, mappedpkg)
if qvar == "PKGSIZE":
- # append packagename
- qvar = "%s_%s" % (args.valuename, mappedpkg)
# PKGSIZE is now in bytes, but we we want it in KB
- pkgsize = (int(readvar(revlink, qvar)) + 1024 // 2) // 1024
+ pkgsize = (int(value) + 1024 // 2) // 1024
value = "%d" % pkgsize
- else:
- value = readvar(revlink, qvar)
+ if args.unescape:
+ import codecs
+ # escape_decode() unescapes backslash encodings in byte streams
+ value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
if args.prefix_name:
print('%s %s' % (pkg_name, value))
else:
print(value)
+ else:
+ logger.debug("revlink %s does not exist", revlink)
def lookup_pkglist(pkgs, pkgdata_dir, reverse):
if reverse:
@@ -249,32 +241,74 @@ def lookup_pkg(args):
print('\n'.join(items))
def lookup_recipe(args):
+ def parse_pkgdatafile(pkgdatafile):
+ with open(pkgdatafile, 'r') as f:
+ found = False
+ for line in f:
+ if line.startswith('PN:'):
+ print("%s" % line.split(':', 1)[1].strip())
+ found = True
+ break
+ if not found:
+ logger.error("Unable to find PN entry in %s" % pkgdatafile)
+ sys.exit(1)
+
# Handle both multiple arguments and multiple values within an arg (old syntax)
pkgs = []
for pkgitem in args.pkg:
pkgs.extend(pkgitem.split())
- mappings = defaultdict(list)
for pkg in pkgs:
- pkgfile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
- if os.path.exists(pkgfile):
- with open(pkgfile, 'r') as f:
- for line in f:
- fields = line.rstrip().split(': ')
- if fields[0] == 'PN':
- mappings[pkg].append(fields[1])
- break
- if len(mappings) < len(pkgs):
- missing = list(set(pkgs) - set(mappings.keys()))
- logger.error("The following packages could not be found: %s" % ', '.join(missing))
- sys.exit(1)
-
- items = []
- for pkg in pkgs:
- items.extend(mappings.get(pkg, []))
- print('\n'.join(items))
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile)
+ continue
+ pkgdatafile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
+ if os.path.exists(pkgdatafile):
+ parse_pkgdatafile(pkgdatafile)
+ else:
+ if args.carryon:
+ print("The following packages could not be found: %s" % pkg)
+ else:
+ logger.error("The following packages could not be found: %s" % pkg)
+ sys.exit(1)
def package_info(args):
+ def parse_pkgdatafile(pkgdatafile):
+ vars = ['PKGV', 'PKGE', 'PKGR', 'PN', 'PV', 'PE', 'PR', 'PKGSIZE']
+ if args.extra:
+ vars += args.extra
+ with open(pkgdatafile, 'r') as f:
+ vals = dict()
+ extra = ''
+ for line in f:
+ for var in vars:
+ m = re.match(var + '(?:_\S+)?:\s*(.+?)\s*$', line)
+ if m:
+ vals[var] = m.group(1)
+ pkg_version = vals['PKGV'] or ''
+ recipe = vals['PN'] or ''
+ recipe_version = vals['PV'] or ''
+ pkg_size = vals['PKGSIZE'] or ''
+ if 'PKGE' in vals:
+ pkg_version = vals['PKGE'] + ":" + pkg_version
+ if 'PKGR' in vals:
+ pkg_version = pkg_version + "-" + vals['PKGR']
+ if 'PE' in vals:
+ recipe_version = vals['PE'] + ":" + recipe_version
+ if 'PR' in vals:
+ recipe_version = recipe_version + "-" + vals['PR']
+ if args.extra:
+ for var in args.extra:
+ if var in vals:
+ val = re.sub(r'\s+', ' ', vals[var])
+ extra += ' "%s"' % val
+ print("%s %s %s %s %s%s" % (pkg, pkg_version, recipe, recipe_version, pkg_size, extra))
+
# Handle both multiple arguments and multiple values within an arg (old syntax)
packages = []
if args.file:
@@ -290,44 +324,20 @@ def package_info(args):
logger.error("No packages specified")
sys.exit(1)
- mappings = defaultdict(lambda: defaultdict(str))
- for pkg in packages:
- pkgfile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
- if os.path.exists(pkgfile):
- with open(pkgfile, 'r') as f:
- for line in f:
- fields = line.rstrip().split(': ')
- if fields[0].endswith("_" + pkg):
- k = fields[0][:len(fields[0]) - len(pkg) - 1]
- else:
- k = fields[0]
- v = fields[1] if len(fields) == 2 else ""
- mappings[pkg][k] = v
-
- if len(mappings) < len(packages):
- missing = list(set(packages) - set(mappings.keys()))
- logger.error("The following packages could not be found: %s" %
- ', '.join(missing))
- sys.exit(1)
-
- items = []
for pkg in packages:
- pkg_version = mappings[pkg]['PKGV']
- if mappings[pkg]['PKGE']:
- pkg_version = mappings[pkg]['PKGE'] + ":" + pkg_version
- if mappings[pkg]['PKGR']:
- pkg_version = pkg_version + "-" + mappings[pkg]['PKGR']
- recipe = mappings[pkg]['PN']
- recipe_version = mappings[pkg]['PV']
- if mappings[pkg]['PE']:
- recipe_version = mappings[pkg]['PE'] + ":" + recipe_version
- if mappings[pkg]['PR']:
- recipe_version = recipe_version + "-" + mappings[pkg]['PR']
- pkg_size = mappings[pkg]['PKGSIZE']
-
- items.append("%s %s %s %s %s" %
- (pkg, pkg_version, recipe, recipe_version, pkg_size))
- print('\n'.join(items))
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile)
+ continue
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime-reverse", pkg)
+ if not os.path.exists(pkgdatafile):
+ logger.error("Unable to find any built runtime package named %s" % pkg)
+ sys.exit(1)
+ parse_pkgdatafile(pkgdatafile)
def get_recipe_pkgs(pkgdata_dir, recipe, unpackaged):
recipedatafile = os.path.join(pkgdata_dir, recipe)
@@ -379,21 +389,16 @@ def list_pkgs(args):
return False
return True
+ pkglist = []
if args.recipe:
packages = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
if args.runtime:
- pkglist = []
runtime_pkgs = lookup_pkglist(packages, args.pkgdata_dir, False)
for rtpkgs in runtime_pkgs.values():
pkglist.extend(rtpkgs)
else:
pkglist = packages
-
- for pkg in pkglist:
- if matchpkg(pkg):
- found = True
- print("%s" % pkg)
else:
if args.runtime:
searchdir = 'runtime-reverse'
@@ -404,9 +409,13 @@ def list_pkgs(args):
for fn in files:
if fn.endswith('.packaged'):
continue
- if matchpkg(fn):
- found = True
- print("%s" % fn)
+ pkglist.append(fn)
+
+ for pkg in sorted(pkglist):
+ if matchpkg(pkg):
+ found = True
+ print("%s" % pkg)
+
if not found:
if args.pkgspec:
logger.error("Unable to find any package matching %s" % args.pkgspec)
@@ -416,6 +425,26 @@ def list_pkgs(args):
def list_pkg_files(args):
import json
+ def parse_pkgdatafile(pkgdatafile, long=False):
+ with open(pkgdatafile, 'r') as f:
+ found = False
+ for line in f:
+ if line.startswith('FILES_INFO:'):
+ found = True
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ if long:
+ width = max(map(len, dictval), default=0)
+ for fullpth in sorted(dictval):
+ print("\t{:{width}}\t{}".format(fullpth, dictval[fullpth], width=width))
+ else:
+ for fullpth in sorted(dictval):
+ print("\t%s" % fullpth)
+ break
+ if not found:
+ logger.error("Unable to find FILES_INFO entry in %s" % pkgdatafile)
+ sys.exit(1)
+
if args.recipe:
if args.pkg:
@@ -445,25 +474,22 @@ def list_pkg_files(args):
continue
logger.error("Unable to find any built runtime package named %s" % pkg)
sys.exit(1)
+ parse_pkgdatafile(pkgdatafile, args.long)
+
else:
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile, args.long)
+ continue
pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", pkg)
if not os.path.exists(pkgdatafile):
logger.error("Unable to find any built recipe-space package named %s" % pkg)
sys.exit(1)
-
- with open(pkgdatafile, 'r') as f:
- found = False
- for line in f:
- if line.startswith('FILES_INFO:'):
- found = True
- val = line.split(':', 1)[1].strip()
- dictval = json.loads(val)
- for fullpth in sorted(dictval):
- print("\t%s" % fullpth)
- break
- if not found:
- logger.error("Unable to find FILES_INFO entry in %s" % pkgdatafile)
- sys.exit(1)
+ parse_pkgdatafile(pkgdatafile, args.long)
def find_path(args):
import json
@@ -517,19 +543,22 @@ def main():
parser_list_pkg_files.add_argument('-r', '--runtime', help='Specified package(s) are runtime package names instead of recipe-space package names', action='store_true')
parser_list_pkg_files.add_argument('-p', '--recipe', help='Report on all packages produced by the specified recipe')
parser_list_pkg_files.add_argument('-u', '--unpackaged', help='Include unpackaged (i.e. empty) packages (only useful with -p/--recipe)', action='store_true')
+ parser_list_pkg_files.add_argument('-l', '--long', help='Show more information per file', action='store_true')
parser_list_pkg_files.set_defaults(func=list_pkg_files)
parser_lookup_recipe = subparsers.add_parser('lookup-recipe',
help='Find recipe producing one or more packages',
description='Looks up the specified runtime package(s) to see which recipe they were produced by')
parser_lookup_recipe.add_argument('pkg', nargs='+', help='Runtime package name to look up')
+ parser_lookup_recipe.add_argument('-c', '--continue', dest="carryon", help='Continue looking up recipes even if we can not find one', action='store_true')
parser_lookup_recipe.set_defaults(func=lookup_recipe)
parser_package_info = subparsers.add_parser('package-info',
- help='Shows version, recipe and size information for one or more packages',
+ help='Show version, recipe and size information for one or more packages',
description='Looks up the specified runtime package(s) and display information')
parser_package_info.add_argument('pkg', nargs='*', help='Runtime package name to look up')
parser_package_info.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
+ parser_package_info.add_argument('-e', '--extra', help='Extra variables to display, e.g., LICENSE (can be specified multiple times)', action='append')
parser_package_info.set_defaults(func=package_info)
parser_find_path = subparsers.add_parser('find-path',
@@ -545,6 +574,7 @@ def main():
parser_read_value.add_argument('pkg', nargs='*', help='Runtime package name to look up')
parser_read_value.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
parser_read_value.add_argument('-n', '--prefix-name', help='Prefix output with package name', action='store_true')
+ parser_read_value.add_argument('-u', '--unescape', help='Expand escapes such as \\n', action='store_true')
parser_read_value.set_defaults(func=read_value)
parser_glob = subparsers.add_parser('glob',
@@ -570,7 +600,7 @@ def main():
logger.debug('Found bitbake path: %s' % bitbakepath)
tinfoil = tinfoil_init()
try:
- args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
finally:
tinfoil.shutdown()
logger.debug('Value of PKGDATA_DIR is "%s"' % args.pkgdata_dir)
diff --git a/scripts/oe-publish-sdk b/scripts/oe-publish-sdk
index 4fe8974dee..4b70f436b1 100755
--- a/scripts/oe-publish-sdk
+++ b/scripts/oe-publish-sdk
@@ -1,21 +1,11 @@
#!/usr/bin/env python3
-
+#
# OpenEmbedded SDK publishing tool
-
-# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -114,9 +104,9 @@ def publish(args):
# Setting up the git repo
if not is_remote:
- cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo "*.pyc\n*.pyo" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true; git update-server-info' % (destination, destination)
+ cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
else:
- cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo '*.pyc\n*.pyo' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true; git update-server-info'" % (host, destdir, destdir)
+ cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('SDK published successfully')
diff --git a/scripts/oe-pylint b/scripts/oe-pylint
new file mode 100755
index 0000000000..7cc1ccb010
--- /dev/null
+++ b/scripts/oe-pylint
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Run the pylint3 against our common python module spaces and print a report of potential issues
+#
+this_dir=$(dirname $(readlink -f $0))
+ERRORS="-E"
+IGNORELIST="$ERRORS -d logging-too-many-args -d missing-docstring -d line-too-long -d invalid-name"
+PYTHONPATH=$this_dir/../bitbake/lib/ pylint3 $IGNORELIST bb
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST -d undefined-variable oe
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST oeqa
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib:$this_dir/lib pylint3 $IGNORELIST -d undefined-variable argparse_oe buildstats devtool recipetool scriptpath testcasemgmt build_perf checklayer resulttool scriptutils wic \ No newline at end of file
diff --git a/scripts/oe-run-native b/scripts/oe-run-native
new file mode 100755
index 0000000000..bea5d696d7
--- /dev/null
+++ b/scripts/oe-run-native
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+
+#
+# This script is for running tools from native oe sysroot
+#
+
+if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+ echo 'oe-run-native: the following arguments are required: <native recipe> <native tool>'
+ echo 'Usage: oe-run-native native-recipe tool [parameters]'
+ echo ''
+ echo 'OpenEmbedded run-native - runs native tools'
+ echo ''
+ echo 'arguments:'
+ echo ' native-recipe The recipe which provoides tool'
+ echo ' tool Native tool to run'
+ echo ''
+ exit 2
+fi
+
+native_recipe="$1"
+tool="$2"
+
+if [ "${native_recipe%-native}" = "$native_recipe" ]; then
+ echo Error: $native_recipe is not a native recipe
+ echo Error: Use \"oe-run-native -h\" for help
+ exit 1
+fi
+
+shift
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find oe-find-native-sysroot script"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT $native_recipe
+
+OLD_PATH=$PATH
+
+# look for a tool only in native sysroot
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin/*-native -maxdepth 1 -type d -printf ":%p")
+tool_find=`/usr/bin/which $tool 2>/dev/null`
+
+if [ -n "$tool_find" ] ; then
+ # add old path to allow usage of host tools
+ PATH=$PATH:$OLD_PATH "$@"
+else
+ echo "Error: Unable to find '$tool' in $PATH"
+ echo "Error: Have you run 'bitbake $native_recipe -caddto_recipe_sysroot'?"
+ exit 1
+fi
diff --git a/scripts/oe-selftest b/scripts/oe-selftest
index d9ffd40e8c..18ac0f5869 100755
--- a/scripts/oe-selftest
+++ b/scripts/oe-selftest
@@ -1,19 +1,9 @@
#!/usr/bin/env python3
-# Copyright (c) 2013 Intel Corporation
+# Copyright (c) 2013-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script runs tests defined in meta/lib/oeqa/selftest/
@@ -25,652 +15,51 @@
# E.g: "oe-selftest -r bblayers.BitbakeLayers" will run just the BitbakeLayers class from meta/lib/oeqa/selftest/bblayers.py
+
import os
import sys
-import unittest
-import logging
import argparse
-import subprocess
-import time as t
-import re
-import fnmatch
-import collections
-import imp
+import logging
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
import scriptpath
-scriptpath.add_bitbake_lib_path()
scriptpath.add_oe_lib_path()
-import argparse_oe
-
-import oeqa.selftest
-import oeqa.utils.ftools as ftools
-from oeqa.utils.commands import runCmd, get_bb_var, get_test_layer
-from oeqa.selftest.base import oeSelfTest, get_available_machines
-
-try:
- import xmlrunner
- from xmlrunner.result import _XMLTestResult as TestResult
- from xmlrunner import XMLTestRunner as _TestRunner
-except ImportError:
- # use the base runner instead
- from unittest import TextTestResult as TestResult
- from unittest import TextTestRunner as _TestRunner
-
-log_prefix = "oe-selftest-" + t.strftime("%Y%m%d-%H%M%S")
-
-def logger_create():
- log_file = log_prefix + ".log"
- if os.path.exists("oe-selftest.log"): os.remove("oe-selftest.log")
- os.symlink(log_file, "oe-selftest.log")
-
- log = logging.getLogger("selftest")
- log.setLevel(logging.DEBUG)
-
- fh = logging.FileHandler(filename=log_file, mode='w')
- fh.setLevel(logging.DEBUG)
-
- ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.INFO)
-
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- fh.setFormatter(formatter)
- ch.setFormatter(formatter)
-
- log.addHandler(fh)
- log.addHandler(ch)
+scriptpath.add_bitbake_lib_path()
- return log
+from oeqa.utils import load_test_components
+from oeqa.core.exception import OEQAPreRun
-log = logger_create()
+logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout, keepalive=True)
-def get_args_parser():
- description = "Script that runs unit tests agains bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
+def main():
+ description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
parser = argparse_oe.ArgumentParser(description=description)
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('-r', '--run-tests', required=False, action='store', nargs='*', dest="run_tests", default=None, help='Select what tests to run (modules, classes or test methods). Format should be: <module>.<class>.<test_method>')
- group.add_argument('-a', '--run-all-tests', required=False, action="store_true", dest="run_all_tests", default=False, help='Run all (unhidden) tests')
- group.add_argument('-m', '--list-modules', required=False, action="store_true", dest="list_modules", default=False, help='List all available test modules.')
- group.add_argument('--list-classes', required=False, action="store_true", dest="list_allclasses", default=False, help='List all available test classes.')
- parser.add_argument('--coverage', action="store_true", help="Run code coverage when testing")
- parser.add_argument('--coverage-source', dest="coverage_source", nargs="+", help="Specifiy the directories to take coverage from")
- parser.add_argument('--coverage-include', dest="coverage_include", nargs="+", help="Specify extra patterns to include into the coverage measurement")
- parser.add_argument('--coverage-omit', dest="coverage_omit", nargs="+", help="Specify with extra patterns to exclude from the coverage measurement")
- group.add_argument('--run-tests-by', required=False, dest='run_tests_by', default=False, nargs='*',
- help='run-tests-by <name|class|module|id|tag> <list of tests|classes|modules|ids|tags>')
- group.add_argument('--list-tests-by', required=False, dest='list_tests_by', default=False, nargs='*',
- help='list-tests-by <name|class|module|id|tag> <list of tests|classes|modules|ids|tags>')
- group.add_argument('-l', '--list-tests', required=False, action="store_true", dest="list_tests", default=False,
- help='List all available tests.')
- group.add_argument('--list-tags', required=False, dest='list_tags', default=False, action="store_true",
- help='List all tags that have been set to test cases.')
- parser.add_argument('--machine', required=False, dest='machine', choices=['random', 'all'], default=None,
- help='Run tests on different machines (random/all).')
- return parser
-
-
-def preflight_check():
-
- log.info("Checking that everything is in order before running the tests")
-
- if not os.environ.get("BUILDDIR"):
- log.error("BUILDDIR isn't set. Did you forget to source your build environment setup script?")
- return False
-
- builddir = os.environ.get("BUILDDIR")
- if os.getcwd() != builddir:
- log.info("Changing cwd to %s" % builddir)
- os.chdir(builddir)
-
- if not "meta-selftest" in get_bb_var("BBLAYERS"):
- log.error("You don't seem to have the meta-selftest layer in BBLAYERS")
- return False
-
- log.info("Running bitbake -p")
- runCmd("bitbake -p")
-
- return True
-
-def add_include():
- builddir = os.environ.get("BUILDDIR")
- if "#include added by oe-selftest.py" \
- not in ftools.read_file(os.path.join(builddir, "conf/local.conf")):
- log.info("Adding: \"include selftest.inc\" in local.conf")
- ftools.append_file(os.path.join(builddir, "conf/local.conf"), \
- "\n#include added by oe-selftest.py\ninclude machine.inc\ninclude selftest.inc")
-
- if "#include added by oe-selftest.py" \
- not in ftools.read_file(os.path.join(builddir, "conf/bblayers.conf")):
- log.info("Adding: \"include bblayers.inc\" in bblayers.conf")
- ftools.append_file(os.path.join(builddir, "conf/bblayers.conf"), \
- "\n#include added by oe-selftest.py\ninclude bblayers.inc")
-def remove_include():
- builddir = os.environ.get("BUILDDIR")
- if builddir is None:
- return
- if "#include added by oe-selftest.py" \
- in ftools.read_file(os.path.join(builddir, "conf/local.conf")):
- log.info("Removing the include from local.conf")
- ftools.remove_from_file(os.path.join(builddir, "conf/local.conf"), \
- "\n#include added by oe-selftest.py\ninclude machine.inc\ninclude selftest.inc")
-
- if "#include added by oe-selftest.py" \
- in ftools.read_file(os.path.join(builddir, "conf/bblayers.conf")):
- log.info("Removing the include from bblayers.conf")
- ftools.remove_from_file(os.path.join(builddir, "conf/bblayers.conf"), \
- "\n#include added by oe-selftest.py\ninclude bblayers.inc")
-
-def remove_inc_files():
- try:
- os.remove(os.path.join(os.environ.get("BUILDDIR"), "conf/selftest.inc"))
- for root, _, files in os.walk(get_test_layer()):
- for f in files:
- if f == 'test_recipe.inc':
- os.remove(os.path.join(root, f))
- except (AttributeError, OSError,) as e: # AttributeError may happen if BUILDDIR is not set
- pass
-
- for incl_file in ['conf/bblayers.inc', 'conf/machine.inc']:
- try:
- os.remove(os.path.join(os.environ.get("BUILDDIR"), incl_file))
- except:
- pass
-
-
-def get_tests_modules(include_hidden=False):
- modules_list = list()
- for modules_path in oeqa.selftest.__path__:
- for (p, d, f) in os.walk(modules_path):
- files = sorted([f for f in os.listdir(p) if f.endswith('.py') and not (f.startswith('_') and not include_hidden) and not f.startswith('__') and f != 'base.py'])
- for f in files:
- submodules = p.split("selftest")[-1]
- module = ""
- if submodules:
- module = 'oeqa.selftest' + submodules.replace("/",".") + "." + f.split('.py')[0]
- else:
- module = 'oeqa.selftest.' + f.split('.py')[0]
- if module not in modules_list:
- modules_list.append(module)
- return modules_list
-
-
-def get_tests(exclusive_modules=[], include_hidden=False):
- test_modules = list()
- for x in exclusive_modules:
- test_modules.append('oeqa.selftest.' + x)
- if not test_modules:
- inc_hidden = include_hidden
- test_modules = get_tests_modules(inc_hidden)
-
- return test_modules
-
-
-class Tc:
- def __init__(self, tcname, tcclass, tcmodule, tcid=None, tctag=None):
- self.tcname = tcname
- self.tcclass = tcclass
- self.tcmodule = tcmodule
- self.tcid = tcid
- # A test case can have multiple tags (as tuples) otherwise str will suffice
- self.tctag = tctag
- self.fullpath = '.'.join(['oeqa', 'selftest', tcmodule, tcclass, tcname])
-
-
-def get_tests_from_module(tmod):
- tlist = []
- prefix = 'oeqa.selftest.'
+ comp_name, comp = load_test_components(logger, 'oe-selftest').popitem()
+ comp.register_commands(logger, parser)
try:
- import importlib
- modlib = importlib.import_module(tmod)
- for mod in list(vars(modlib).values()):
- if isinstance(mod, type(oeSelfTest)) and issubclass(mod, oeSelfTest) and mod is not oeSelfTest:
- for test in dir(mod):
- if test.startswith('test_') and hasattr(vars(mod)[test], '__call__'):
- # Get test case id and feature tag
- # NOTE: if testcase decorator or feature tag not set will throw error
- try:
- tid = vars(mod)[test].test_case
- except:
- print('DEBUG: tc id missing for ' + str(test))
- tid = None
- try:
- ttag = vars(mod)[test].tag__feature
- except:
- # print('DEBUG: feature tag missing for ' + str(test))
- ttag = None
-
- # NOTE: for some reason lstrip() doesn't work for mod.__module__
- tlist.append(Tc(test, mod.__name__, mod.__module__.replace(prefix, ''), tid, ttag))
- except:
- pass
-
- return tlist
-
-
-def get_all_tests():
- # Get all the test modules (except the hidden ones)
- testlist = []
- tests_modules = get_tests_modules()
- # Get all the tests from modules
- for tmod in sorted(tests_modules):
- testlist += get_tests_from_module(tmod)
- return testlist
-
-
-def get_testsuite_by(criteria, keyword):
- # Get a testsuite based on 'keyword'
- # criteria: name, class, module, id, tag
- # keyword: a list of tests, classes, modules, ids, tags
-
- ts = []
- all_tests = get_all_tests()
-
- def get_matches(values):
- # Get an item and return the ones that match with keyword(s)
- # values: the list of items (names, modules, classes...)
- result = []
- remaining = values[:]
- for key in keyword:
- found = False
- if key in remaining:
- # Regular matching of exact item
- result.append(key)
- remaining.remove(key)
- found = True
- else:
- # Wildcard matching
- pattern = re.compile(fnmatch.translate(r"%s" % key))
- added = [x for x in remaining if pattern.match(x)]
- if added:
- result.extend(added)
- remaining = [x for x in remaining if x not in added]
- found = True
- if not found:
- log.error("Failed to find test: %s" % key)
-
- return result
-
- if criteria == 'name':
- names = get_matches([ tc.tcname for tc in all_tests ])
- ts = [ tc for tc in all_tests if tc.tcname in names ]
-
- elif criteria == 'class':
- classes = get_matches([ tc.tcclass for tc in all_tests ])
- ts = [ tc for tc in all_tests if tc.tcclass in classes ]
-
- elif criteria == 'module':
- modules = get_matches([ tc.tcmodule for tc in all_tests ])
- ts = [ tc for tc in all_tests if tc.tcmodule in modules ]
-
- elif criteria == 'id':
- ids = get_matches([ str(tc.tcid) for tc in all_tests ])
- ts = [ tc for tc in all_tests if str(tc.tcid) in ids ]
-
- elif criteria == 'tag':
- values = set()
- for tc in all_tests:
- # tc can have multiple tags (as tuple) otherwise str will suffice
- if isinstance(tc.tctag, tuple):
- values |= { str(tag) for tag in tc.tctag }
- else:
- values.add(str(tc.tctag))
-
- tags = get_matches(list(values))
-
- for tc in all_tests:
- for tag in tags:
- if isinstance(tc.tctag, tuple) and tag in tc.tctag:
- ts.append(tc)
- elif tag == tc.tctag:
- ts.append(tc)
-
- # Remove duplicates from the list
- ts = list(set(ts))
-
- return ts
-
-
-def list_testsuite_by(criteria, keyword):
- # Get a testsuite based on 'keyword'
- # criteria: name, class, module, id, tag
- # keyword: a list of tests, classes, modules, ids, tags
-
- ts = sorted([ (tc.tcid, tc.tctag, tc.tcname, tc.tcclass, tc.tcmodule) for tc in get_testsuite_by(criteria, keyword) ])
-
- print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % ('id', 'tag', 'name', 'class', 'module'))
- print('_' * 150)
- for t in ts:
- if isinstance(t[1], (tuple, list)):
- print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t[0], ', '.join(t[1]), t[2], t[3], t[4]))
- else:
- print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % t)
- print('_' * 150)
- print('Filtering by:\t %s' % criteria)
- print('Looking for:\t %s' % ', '.join(str(x) for x in keyword))
- print('Total found:\t %s' % len(ts))
-
-
-def list_tests():
- # List all available oe-selftest tests
-
- ts = get_all_tests()
-
- print('%-4s\t%-10s\t%-50s' % ('id', 'tag', 'test'))
- print('_' * 80)
- for t in ts:
- if isinstance(t.tctag, (tuple, list)):
- print('%-4s\t%-10s\t%-50s' % (t.tcid, ', '.join(t.tctag), '.'.join([t.tcmodule, t.tcclass, t.tcname])))
- else:
- print('%-4s\t%-10s\t%-50s' % (t.tcid, t.tctag, '.'.join([t.tcmodule, t.tcclass, t.tcname])))
- print('_' * 80)
- print('Total found:\t %s' % len(ts))
-
-def list_tags():
- # Get all tags set to test cases
- # This is useful when setting tags to test cases
- # The list of tags should be kept as minimal as possible
- tags = set()
- all_tests = get_all_tests()
-
- for tc in all_tests:
- if isinstance(tc.tctag, (tuple, list)):
- tags.update(set(tc.tctag))
- else:
- tags.add(tc.tctag)
-
- print('Tags:\t%s' % ', '.join(str(x) for x in tags))
-
-def coverage_setup(coverage_source, coverage_include, coverage_omit):
- """ Set up the coverage measurement for the testcases to be run """
- import datetime
- import subprocess
- builddir = os.environ.get("BUILDDIR")
- pokydir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- curcommit= subprocess.check_output(["git", "--git-dir", os.path.join(pokydir, ".git"), "rev-parse", "HEAD"]).decode('utf-8')
- coveragerc = "%s/.coveragerc" % builddir
- data_file = "%s/.coverage." % builddir
- data_file += datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
- if os.path.isfile(data_file):
- os.remove(data_file)
- with open(coveragerc, 'w') as cps:
- cps.write("# Generated with command '%s'\n" % " ".join(sys.argv))
- cps.write("# HEAD commit %s\n" % curcommit.strip())
- cps.write("[run]\n")
- cps.write("data_file = %s\n" % data_file)
- cps.write("branch = True\n")
- # Measure just BBLAYERS, scripts and bitbake folders
- cps.write("source = \n")
- if coverage_source:
- for directory in coverage_source:
- if not os.path.isdir(directory):
- log.warn("Directory %s is not valid.", directory)
- cps.write(" %s\n" % directory)
- else:
- for layer in get_bb_var('BBLAYERS').split():
- cps.write(" %s\n" % layer)
- cps.write(" %s\n" % os.path.dirname(os.path.realpath(__file__)))
- cps.write(" %s\n" % os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'bitbake'))
-
- if coverage_include:
- cps.write("include = \n")
- for pattern in coverage_include:
- cps.write(" %s\n" % pattern)
- if coverage_omit:
- cps.write("omit = \n")
- for pattern in coverage_omit:
- cps.write(" %s\n" % pattern)
-
- return coveragerc
-
-def coverage_report():
- """ Loads the coverage data gathered and reports it back """
- try:
- # Coverage4 uses coverage.Coverage
- from coverage import Coverage
- except:
- # Coverage under version 4 uses coverage.coverage
- from coverage import coverage as Coverage
-
- import io as StringIO
- from coverage.misc import CoverageException
-
- cov_output = StringIO.StringIO()
- # Creating the coverage data with the setting from the configuration file
- cov = Coverage(config_file = os.environ.get('COVERAGE_PROCESS_START'))
- try:
- # Load data from the data file specified in the configuration
- cov.load()
- # Store report data in a StringIO variable
- cov.report(file = cov_output, show_missing=False)
- log.info("\n%s" % cov_output.getvalue())
- except CoverageException as e:
- # Show problems with the reporting. Since Coverage4 not finding any data to report raises an exception
- log.warn("%s" % str(e))
- finally:
- cov_output.close()
-
-
-def main():
- parser = get_args_parser()
- args = parser.parse_args()
-
- # Add <layer>/lib to sys.path, so layers can add selftests
- log.info("Running bitbake -e to get BBPATH")
- bbpath = get_bb_var('BBPATH').split(':')
- layer_libdirs = [p for p in (os.path.join(l, 'lib') for l in bbpath) if os.path.exists(p)]
- sys.path.extend(layer_libdirs)
- imp.reload(oeqa.selftest)
-
- if args.run_tests_by and len(args.run_tests_by) >= 2:
- valid_options = ['name', 'class', 'module', 'id', 'tag']
- if args.run_tests_by[0] not in valid_options:
- print('--run-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.run_tests_by[0])
- return 1
- else:
- criteria = args.run_tests_by[0]
- keyword = args.run_tests_by[1:]
- ts = sorted([ tc.fullpath for tc in get_testsuite_by(criteria, keyword) ])
- if not ts:
- return 1
-
- if args.list_tests_by and len(args.list_tests_by) >= 2:
- valid_options = ['name', 'class', 'module', 'id', 'tag']
- if args.list_tests_by[0] not in valid_options:
- print('--list-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.list_tests_by[0])
- return 1
- else:
- criteria = args.list_tests_by[0]
- keyword = args.list_tests_by[1:]
- list_testsuite_by(criteria, keyword)
-
- if args.list_tests:
- list_tests()
-
- if args.list_tags:
- list_tags()
-
- if args.list_allclasses:
- args.list_modules = True
-
- if args.list_modules:
- log.info('Listing all available test modules:')
- testslist = get_tests(include_hidden=True)
- for test in testslist:
- module = test.split('oeqa.selftest.')[-1]
- info = ''
- if module.startswith('_'):
- info = ' (hidden)'
- print(module + info)
- if args.list_allclasses:
- try:
- import importlib
- modlib = importlib.import_module(test)
- for v in vars(modlib):
- t = vars(modlib)[v]
- if isinstance(t, type(oeSelfTest)) and issubclass(t, oeSelfTest) and t!=oeSelfTest:
- print(" --", v)
- for method in dir(t):
- if method.startswith("test_") and isinstance(vars(t)[method], collections.Callable):
- print(" -- --", method)
-
- except (AttributeError, ImportError) as e:
- print(e)
- pass
-
- if args.run_tests or args.run_all_tests or args.run_tests_by:
- if not preflight_check():
- return 1
-
- if args.run_tests_by:
- testslist = ts
- else:
- testslist = get_tests(exclusive_modules=(args.run_tests or []), include_hidden=False)
-
- suite = unittest.TestSuite()
- loader = unittest.TestLoader()
- loader.sortTestMethodsUsing = None
- runner = TestRunner(verbosity=2,
- resultclass=buildResultClass(args))
- # we need to do this here, otherwise just loading the tests
- # will take 2 minutes (bitbake -e calls)
- oeSelfTest.testlayer_path = get_test_layer()
- for test in testslist:
- log.info("Loading tests from: %s" % test)
- try:
- suite.addTests(loader.loadTestsFromName(test))
- except AttributeError as e:
- log.error("Failed to import %s" % test)
- log.error(e)
- return 1
- add_include()
-
- if args.machine:
- # Custom machine sets only weak default values (??=) for MACHINE in machine.inc
- # This let test cases that require a specific MACHINE to be able to override it, using (?= or =)
- log.info('Custom machine mode enabled. MACHINE set to %s' % args.machine)
- if args.machine == 'random':
- os.environ['CUSTOMMACHINE'] = 'random'
- result = runner.run(suite)
- else: # all
- machines = get_available_machines()
- for m in machines:
- log.info('Run tests with custom MACHINE set to: %s' % m)
- os.environ['CUSTOMMACHINE'] = m
- result = runner.run(suite)
- else:
- result = runner.run(suite)
-
- log.info("Finished")
-
- if result.wasSuccessful():
- return 0
- else:
- return 1
-
-def buildResultClass(args):
- """Build a Result Class to use in the testcase execution"""
- import site
-
- class StampedResult(TestResult):
- """
- Custom TestResult that prints the time when a test starts. As oe-selftest
- can take a long time (ie a few hours) to run, timestamps help us understand
- what tests are taking a long time to execute.
- If coverage is required, this class executes the coverage setup and reporting.
- """
- def startTest(self, test):
- import time
- self.stream.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " - ")
- super(StampedResult, self).startTest(test)
-
- def startTestRun(self):
- """ Setup coverage before running any testcase """
-
- # variable holding the coverage configuration file allowing subprocess to be measured
- self.coveragepth = None
-
- # indicates the system if coverage is currently installed
- self.coverage_installed = True
-
- if args.coverage or args.coverage_source or args.coverage_include or args.coverage_omit:
- try:
- # check if user can do coverage
- import coverage
- except:
- log.warn("python coverage is not installed. More info on https://pypi.python.org/pypi/coverage")
- self.coverage_installed = False
-
- if self.coverage_installed:
- log.info("Coverage is enabled")
-
- major_version = int(coverage.version.__version__[0])
- if major_version < 4:
- log.error("python coverage %s installed. Require version 4 or greater." % coverage.version.__version__)
- self.stop()
- # In case the user has not set the variable COVERAGE_PROCESS_START,
- # create a default one and export it. The COVERAGE_PROCESS_START
- # value indicates where the coverage configuration file resides
- # More info on https://pypi.python.org/pypi/coverage
- if not os.environ.get('COVERAGE_PROCESS_START'):
- os.environ['COVERAGE_PROCESS_START'] = coverage_setup(args.coverage_source, args.coverage_include, args.coverage_omit)
-
- # Use default site.USER_SITE and write corresponding config file
- site.ENABLE_USER_SITE = True
- if not os.path.exists(site.USER_SITE):
- os.makedirs(site.USER_SITE)
- self.coveragepth = os.path.join(site.USER_SITE, "coverage.pth")
- with open(self.coveragepth, 'w') as cps:
- cps.write('import sys,site; sys.path.extend(site.getsitepackages()); import coverage; coverage.process_startup();')
-
- def stopTestRun(self):
- """ Report coverage data after the testcases are run """
-
- if args.coverage or args.coverage_source or args.coverage_include or args.coverage_omit:
- if self.coverage_installed:
- with open(os.environ['COVERAGE_PROCESS_START']) as ccf:
- log.info("Coverage configuration file (%s)" % os.environ.get('COVERAGE_PROCESS_START'))
- log.info("===========================")
- log.info("\n%s" % "".join(ccf.readlines()))
-
- log.info("Coverage Report")
- log.info("===============")
- try:
- coverage_report()
- finally:
- # remove the pth file
- try:
- os.remove(self.coveragepth)
- except OSError:
- log.warn("Expected temporal file from coverage is missing, ignoring removal.")
-
- return StampedResult
+ args = parser.parse_args()
+ results = args.func(logger, args)
+ ret = 0 if results.wasSuccessful() else 1
+ except SystemExit as err:
+ if err.code != 0:
+ raise err
+ ret = err.code
+ except OEQAPreRun as pr:
+ ret = 1
-class TestRunner(_TestRunner):
- """Test runner class aware of exporting tests."""
- def __init__(self, *args, **kwargs):
- try:
- exportdir = os.path.join(os.getcwd(), log_prefix)
- kwargsx = dict(**kwargs)
- # argument specific to XMLTestRunner, if adding a new runner then
- # also add logic to use other runner's args.
- kwargsx['output'] = exportdir
- kwargsx['descriptions'] = False
- # done for the case where telling the runner where to export
- super(TestRunner, self).__init__(*args, **kwargsx)
- except TypeError:
- log.info("test runner init'ed like unittest")
- super(TestRunner, self).__init__(*args, **kwargs)
+ return ret
-if __name__ == "__main__":
+if __name__ == '__main__':
try:
ret = main()
except Exception:
ret = 1
import traceback
traceback.print_exc()
- finally:
- remove_include()
- remove_inc_files()
sys.exit(ret)
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 93722e08ad..30eaa8efbe 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -4,25 +4,22 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if [ -z "$BUILDDIR" ]; then
echo >&2 "Error: The build directory (BUILDDIR) must be set!"
exit 1
fi
+if [ "$1" = '--help' -o "$1" = '-h' ]; then
+ echo 'Usage: oe-setup-builddir'
+ echo ''
+ echo "OpenEmbedded setup-builddir - setup build directory $BUILDDIR"
+ echo ''
+ exit 2
+fi
+
mkdir -p "$BUILDDIR/conf"
if [ ! -d "$BUILDDIR" ]; then
@@ -61,7 +58,7 @@ if [ -n "$TEMPLATECONF" ]; then
TEMPLATECONF="$OEROOT/$TEMPLATECONF"
fi
if [ ! -d "$TEMPLATECONF" ]; then
- echo >&2 "Error: '$TEMPLATECONF' must be a directory containing local.conf & bblayers.conf"
+ echo >&2 "Error: TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
exit 1
fi
fi
@@ -125,13 +122,6 @@ EOM
# unset SHOWYPDOC
fi
-cat <<EOM
-
-### Shell environment set up for builds. ###
-
-You can now run 'bitbake <target>'
-
-EOM
if [ -z "$OECORENOTESCONF" ]; then
OECORENOTESCONF="$OEROOT/meta/conf/conf-notes.txt"
fi
diff --git a/scripts/oe-setup-rpmrepo b/scripts/oe-setup-rpmrepo
deleted file mode 100755
index 917b98b984..0000000000
--- a/scripts/oe-setup-rpmrepo
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-#
-# This utility setup the necessary metadata for an rpm repo
-#
-# Copyright (c) 2011 Intel Corp.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-# Don't use TMPDIR from the external environment, it may be a distro
-# variable pointing to /tmp (e.g. within X on OpenSUSE)
-# Instead, use OE_TMPDIR for passing this in externally.
-TMPDIR="$OE_TMPDIR"
-
-function usage() {
- echo "Usage: $0 <rpm-dir>"
- echo " <rpm-dir>: default is $TMPDIR/deploy/rpm"
-}
-
-if [ $# -gt 1 ]; then
- usage
- exit 1
-fi
-
-setup_tmpdir() {
- if [ -z "$TMPDIR" ]; then
- # Try to get TMPDIR from bitbake
- type -P bitbake &>/dev/null || {
- echo "In order for this script to dynamically infer paths";
- echo "to kernels or filesystem images, you either need";
- echo "bitbake in your PATH or to source oe-init-build-env";
- echo "before running this script" >&2;
- exit 1; }
-
- # We have bitbake in PATH, get TMPDIR from bitbake
- TMPDIR=`bitbake -e | grep ^TMPDIR=\" | cut -d '=' -f2 | cut -d '"' -f2`
- if [ -z "$TMPDIR" ]; then
- echo "Error: this script needs to be run from your build directory,"
- echo "or you need to explicitly set TMPDIR in your environment"
- exit 1
- fi
- fi
-}
-
-setup_sysroot() {
- # Toolchain installs set up $OECORE_NATIVE_SYSROOT in their
- # environment script. If that variable isn't set, we're
- # either in an in-tree poky scenario or the environment
- # script wasn't source'd.
- if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
- setup_tmpdir
- BUILD_ARCH=`uname -m`
- BUILD_OS=`uname | tr '[A-Z]' '[a-z]'`
- BUILD_SYS="$BUILD_ARCH-$BUILD_OS"
-
- OECORE_NATIVE_SYSROOT=$TMPDIR/sysroots/$BUILD_SYS
- fi
-}
-
-setup_tmpdir
-setup_sysroot
-
-
-if [ -n "$1" ]; then
- RPM_DIR="$1"
-else
- RPM_DIR="$TMPDIR/deploy/rpm"
-fi
-
-if [ ! -d "$RPM_DIR" ]; then
- echo "Error: rpm dir $RPM_DIR doesn't exist"
- exit 1
-fi
-
-CREATEREPO=$OECORE_NATIVE_SYSROOT/usr/bin/createrepo
-if [ ! -e "$CREATEREPO" ]; then
- echo "Error: can't find createrepo binary"
- echo "please run bitbake createrepo-native first"
- exit 1
-fi
-
-export PATH=${PATH}:${OECORE_NATIVE_SYSROOT}/usr/bin
-
-$CREATEREPO "$RPM_DIR"
-
-exit 0
diff --git a/scripts/oe-test b/scripts/oe-test
new file mode 100755
index 0000000000..55985b0b24
--- /dev/null
+++ b/scripts/oe-test
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded test tool
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import argparse
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+
+# oe-test is used for testexport and it doesn't have oe lib
+# so we just skip adding these libraries (not used in testexport)
+try:
+ import scriptpath
+ scriptpath.add_oe_lib_path()
+except ImportError:
+ pass
+
+from oeqa.utils import load_test_components
+from oeqa.core.exception import OEQAPreRun
+
+logger = scriptutils.logger_create('oe-test', stream=sys.stdout)
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded test tool",
+ add_help=False,
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ global_args, unparsed_args = parser.parse_known_args()
+
+ # Help is added here rather than via add_help=True, as we don't want it to
+ # be handled by parse_known_args()
+ parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ if global_args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif global_args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ components = load_test_components(logger, 'oe-test')
+
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.add_subparser_group('components', 'Test components')
+ subparsers.required = True
+ for comp_name in sorted(components.keys()):
+ comp = components[comp_name]
+ comp.register_commands(logger, subparsers)
+
+ try:
+ args = parser.parse_args(unparsed_args, namespace=global_args)
+ results = args.func(logger, args)
+ ret = 0 if results.wasSuccessful() else 1
+ except SystemExit as err:
+ if err.code != 0:
+ raise err
+ ret = err.code
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+ except OEQAPreRun as pr:
+ ret = 1
+
+ return ret
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/oe-trim-schemas b/scripts/oe-trim-schemas
index 66a1b8d81d..bf77c8cf64 100755
--- a/scripts/oe-trim-schemas
+++ b/scripts/oe-trim-schemas
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
try:
@@ -18,6 +21,15 @@ def children (elem, name=None):
l = [e for e in l if e.tag == name]
return l
+if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
+ print('oe-trim-schemas: error: the following arguments are required: schema\n'
+ 'Usage: oe-trim-schemas schema\n\n'
+ 'OpenEmbedded trim schemas - remove unneeded schema locale translations\n'
+ ' from gconf schema files\n\n'
+ 'arguments:\n'
+ ' schema gconf schema file to trim\n')
+ sys.exit(2)
+
xml = etree.parse(sys.argv[1])
for schema in child(xml.getroot(), "schemalist").getchildren():
diff --git a/scripts/oepydevshell-internal.py b/scripts/oepydevshell-internal.py
index a22bec3365..96c078ef3d 100755
--- a/scripts/oepydevshell-internal.py
+++ b/scripts/oepydevshell-internal.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
@@ -22,9 +25,16 @@ def cbreaknoecho(fd):
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
termios.tcsetattr(fd, termios.TCSADRAIN, old)
-if len(sys.argv) != 3:
- print("Incorrect parameters")
- sys.exit(1)
+if len(sys.argv) != 3 or sys.argv[1] in ('-h', '--help'):
+ print('oepydevshell-internal.py: error: the following arguments are required: pty, pid\n'
+ 'Usage: oepydevshell-internal.py pty pid\n\n'
+ 'OpenEmbedded oepydevshell-internal.py - internal script called from meta/classes/devshell.bbclass\n\n'
+ 'arguments:\n'
+ ' pty pty device name\n'
+ ' pid parent process id\n\n'
+ 'options:\n'
+ ' -h, --help show this help message and exit\n')
+ sys.exit(2)
pty = open(sys.argv[1], "w+b", 0)
parent = int(sys.argv[2])
@@ -38,7 +48,7 @@ readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(histfile)
except IOError:
- pass
+ pass
try:
@@ -56,7 +66,9 @@ try:
(ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
try:
if pty in ready:
- i = i + pty.read().decode('utf-8')
+ readdata = pty.read()
+ if readdata:
+ i = i + readdata.decode('utf-8')
if i:
# Write a page at a time to avoid overflowing output
# d.keys() is a good way to do that
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index ce89491f60..bc3ab43823 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -6,21 +6,8 @@
#
# Copyright 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-#
-
import sys
import fileinput
diff --git a/scripts/postinst-intercepts/delay_to_first_boot b/scripts/postinst-intercepts/delay_to_first_boot
new file mode 100644
index 0000000000..fa8e1caaf5
--- /dev/null
+++ b/scripts/postinst-intercepts/delay_to_first_boot
@@ -0,0 +1,6 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+exit 1
diff --git a/scripts/postinst-intercepts/postinst_intercept b/scripts/postinst-intercepts/postinst_intercept
index b18e806d43..b91974c885 100755
--- a/scripts/postinst-intercepts/postinst_intercept
+++ b/scripts/postinst-intercepts/postinst_intercept
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: MIT
+#
# This script is called from inside postinstall scriptlets at do_rootfs time. It
# actually adds, at the end, the list of packages for which the intercept script
# is valid. Also, if one wants to pass any variables to the intercept script from
diff --git a/scripts/postinst-intercepts/update_font_cache b/scripts/postinst-intercepts/update_font_cache
index bf65e19a41..3053c7065f 100644
--- a/scripts/postinst-intercepts/update_font_cache
+++ b/scripts/postinst-intercepts/update_font_cache
@@ -1,7 +1,9 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D/${libdir}:$D/${base_libdir} \
- -E ${fontconfigcacheenv} $D${bindir}/fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
chown -R root:root $D${fontconfigcachedir}
diff --git a/scripts/postinst-intercepts/update_gio_module_cache b/scripts/postinst-intercepts/update_gio_module_cache
index fe468092cf..c87fa85db9 100644
--- a/scripts/postinst-intercepts/update_gio_module_cache
+++ b/scripts/postinst-intercepts/update_gio_module_cache
@@ -1,7 +1,11 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D${libdir}:$D${base_libdir} \
- $D${libexecdir}/${binprefix}gio-querymodules $D${libdir}/gio/modules/
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}gio-querymodules $D${libdir}/gio/modules/
+[ ! -e $D${libdir}/gio/modules/giomodule.cache ] ||
+ chown root:root $D${libdir}/gio/modules/giomodule.cache
diff --git a/scripts/postinst-intercepts/update_icon_cache b/scripts/postinst-intercepts/update_gtk_icon_cache
index 9cf2a72a0c..99367a2855 100644
--- a/scripts/postinst-intercepts/update_icon_cache
+++ b/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -1,8 +1,12 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for gtk-icon-cache.bbclass
set -e
-# update native pixbuf loaders
+# Update native pixbuf loaders
$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
for icondir in $D/usr/share/icons/*/ ; do
diff --git a/scripts/postinst-intercepts/update_gtk_immodules_cache b/scripts/postinst-intercepts/update_gtk_immodules_cache
new file mode 100644
index 0000000000..9f07ccca6b
--- /dev/null
+++ b/scripts/postinst-intercepts/update_gtk_immodules_cache
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -e
+
+if [ -x $D${libexecdir}/${binprefix}gtk-query-immodules-2.0 ]; then
+ PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}gtk-query-immodules-2.0 \
+ > $D${libdir}/gtk-2.0/2.10.0/immodules.cache &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+ chown root:root $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+fi
+if [ -x $D${libexecdir}/${binprefix}gtk-query-immodules-3.0 ]; then
+ PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}gtk-query-immodules-3.0 \
+ > $D${libdir}/gtk-3.0/3.0.0/immodules.cache &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
+ chown root:root $D${libdir}/gtk-3.0/3.0.0/immodules.cache
+fi
diff --git a/scripts/postinst-intercepts/update_pixbuf_cache b/scripts/postinst-intercepts/update_pixbuf_cache
index 5d44075fb4..ea12814474 100644
--- a/scripts/postinst-intercepts/update_pixbuf_cache
+++ b/scripts/postinst-intercepts/update_pixbuf_cache
@@ -1,11 +1,13 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
export GDK_PIXBUF_MODULEDIR=$D${libdir}/gdk-pixbuf-2.0/2.10.0/loaders
export GDK_PIXBUF_FATAL_LOADER=1
-PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D/${libdir}:$D/${base_libdir}\
- $D${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
>$GDK_PIXBUF_MODULEDIR/../loaders.cache && \
sed -i -e "s:$D::g" $GDK_PIXBUF_MODULEDIR/../loaders.cache
diff --git a/scripts/postinst-intercepts/update_udev_hwdb b/scripts/postinst-intercepts/update_udev_hwdb
new file mode 100644
index 0000000000..c4fb2bffcb
--- /dev/null
+++ b/scripts/postinst-intercepts/update_udev_hwdb
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -e
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D
+chown root:root $D${sysconfdir}/udev/hwdb.bin
diff --git a/scripts/pybootchartgui/pybootchartgui.py b/scripts/pybootchartgui/pybootchartgui.py
index 7ce1a5be40..1c4062b42c 100755
--- a/scripts/pybootchartgui/pybootchartgui.py
+++ b/scripts/pybootchartgui/pybootchartgui.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# This file is part of pybootchartgui.
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 8c574be50c..add5c53882 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -19,22 +19,23 @@ import math
import re
import random
import colorsys
+import functools
from operator import itemgetter
class RenderOptions:
- def __init__(self, app_options):
- # should we render a cumulative CPU time chart
- self.cumulative = True
- self.charts = True
- self.kernel_only = False
- self.app_options = app_options
+ def __init__(self, app_options):
+ # should we render a cumulative CPU time chart
+ self.cumulative = True
+ self.charts = True
+ self.kernel_only = False
+ self.app_options = app_options
- def proc_tree (self, trace):
- if self.kernel_only:
- return trace.kernel_tree
- else:
- return trace.proc_tree
+ def proc_tree (self, trace):
+ if self.kernel_only:
+ return trace.kernel_tree
+ else:
+ return trace.proc_tree
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
@@ -133,6 +134,16 @@ TASK_COLOR_PACKAGE = (0.0, 1.00, 1.00, 1.0)
# Package Write RPM/DEB/IPK task color
TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
+# Distinct colors used for different disk volumnes.
+# If we have more volumns, colors get re-used.
+VOLUME_COLORS = [
+ (1.0, 1.0, 0.00, 1.0),
+ (0.0, 1.00, 0.00, 1.0),
+ (1.0, 0.00, 1.00, 1.0),
+ (0.0, 0.00, 1.00, 1.0),
+ (0.0, 1.00, 1.00, 1.0),
+]
+
# Process states
STATE_UNDEFINED = 0
STATE_RUNNING = 1
@@ -142,7 +153,7 @@ STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
- PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+ PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# CumulativeStats Types
STAT_TYPE_CPU = 0
@@ -150,80 +161,80 @@ STAT_TYPE_IO = 1
# Convert ps process state to an int
def get_proc_state(flag):
- return "RSDTZXW".find(flag) + 1
+ return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
- ctx.set_source_rgba(*color)
- ctx.move_to(x, y)
- ctx.show_text(text)
+ ctx.set_source_rgba(*color)
+ ctx.move_to(x, y)
+ ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.fill()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.fill()
def draw_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.stroke()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
- ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
- ctx.fill()
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
+ ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
+ ctx.fill()
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
- label_w = ctx.text_extents(label)[2]
- label_x = x + w / 2 - label_w / 2
- if label_w + 10 > w:
- label_x = x + w + 5
- if label_x + label_w > maxx:
- label_x = x - label_w - 5
- draw_text(ctx, label, color, label_x, y)
+ label_w = ctx.text_extents(label)[2]
+ label_x = x + w / 2 - label_w / 2
+ if label_w + 10 > w:
+ label_x = x + w + 5
+ if label_x + label_w > maxx:
+ label_x = x - label_w - 5
+ draw_text(ctx, label, color, label_x, y)
def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
- ctx.set_font_size(AXIS_FONT_SIZE)
- prev_x = 0
- for i in range(0, rect[2] + 1, sec_w):
- if ((i / sec_w) % nsecs == 0) :
- if options.app_options.as_minutes :
- label = "%.1f" % (i / sec_w / 60.0)
- else :
- label = "%d" % (i / sec_w)
- label_w = ctx.text_extents(label)[2]
- x = rect[0] + i - label_w/2
- if x >= prev_x:
- draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
- prev_x = x + label_w
+ ctx.set_font_size(AXIS_FONT_SIZE)
+ prev_x = 0
+ for i in range(0, rect[2] + 1, sec_w):
+ if ((i / sec_w) % nsecs == 0) :
+ if options.app_options.as_minutes :
+ label = "%.1f" % (i / sec_w / 60.0)
+ else :
+ label = "%d" % (i / sec_w)
+ label_w = ctx.text_extents(label)[2]
+ x = rect[0] + i - label_w/2
+ if x >= prev_x:
+ draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ prev_x = x + label_w
def draw_box_ticks(ctx, rect, sec_w):
- draw_rect(ctx, BORDER_COLOR, tuple(rect))
-
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
-
- for i in range(sec_w, rect[2] + 1, sec_w):
- if ((i / sec_w) % 10 == 0) :
- ctx.set_line_width(1.5)
- elif sec_w < 5 :
- continue
- else :
- ctx.set_line_width(1.0)
- if ((i / sec_w) % 30 == 0) :
- ctx.set_source_rgba(*TICK_COLOR_BOLD)
- else :
- ctx.set_source_rgba(*TICK_COLOR)
- ctx.move_to(rect[0] + i, rect[1] + 1)
- ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
- ctx.stroke()
- ctx.set_line_width(1.0)
-
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+ draw_rect(ctx, BORDER_COLOR, tuple(rect))
+
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+
+ for i in range(sec_w, rect[2] + 1, sec_w):
+ if ((i / sec_w) % 10 == 0) :
+ ctx.set_line_width(1.5)
+ elif sec_w < 5 :
+ continue
+ else :
+ ctx.set_line_width(1.0)
+ if ((i / sec_w) % 30 == 0) :
+ ctx.set_source_rgba(*TICK_COLOR_BOLD)
+ else :
+ ctx.set_source_rgba(*TICK_COLOR)
+ ctx.move_to(rect[0] + i, rect[1] + 1)
+ ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
@@ -242,51 +253,51 @@ def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_dash([])
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
- ctx.set_line_width(0.5)
- x_shift = proc_tree.start_time
-
- def transform_point_coords(point, x_base, y_base, \
- xscale, yscale, x_trans, y_trans):
- x = (point[0] - x_base) * xscale + x_trans
- y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
- return x, y
-
- max_x = max (x for (x, y) in data)
- max_y = max (y for (x, y) in data)
- # avoid divide by zero
- if max_y == 0:
- max_y = 1.0
- xscale = float (chart_bounds[2]) / max_x
- # If data_range is given, scale the chart so that the value range in
- # data_range matches the chart bounds exactly.
- # Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
- yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
- ybase = data_range[0]
- else:
- yscale = float(chart_bounds[3]) / max_y
- ybase = 0
-
- first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
-
- ctx.set_source_rgba(*color)
- ctx.move_to(*first)
- for point in data:
- x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- ctx.line_to(x, y)
- if fill:
- ctx.stroke_preserve()
- ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], first[1])
- ctx.fill()
- else:
- ctx.stroke()
- ctx.set_line_width(1.0)
+ ctx.set_line_width(0.5)
+ x_shift = proc_tree.start_time
+
+ def transform_point_coords(point, x_base, y_base, \
+ xscale, yscale, x_trans, y_trans):
+ x = (point[0] - x_base) * xscale + x_trans
+ y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+ return x, y
+
+ max_x = max (x for (x, y) in data)
+ max_y = max (y for (x, y) in data)
+ # avoid divide by zero
+ if max_y == 0:
+ max_y = 1.0
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ # If data_range is given, scale the chart so that the value range in
+ # data_range matches the chart bounds exactly.
+ # Otherwise, scale so that the actual data matches the chart bounds.
+ if data_range:
+ yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
+ ybase = data_range[0]
+ else:
+ yscale = float(chart_bounds[3]) / max_y
+ ybase = 0
+
+ first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+
+ ctx.set_source_rgba(*color)
+ ctx.move_to(*first)
+ for point in data:
+ x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ ctx.line_to(x, y)
+ if fill:
+ ctx.stroke_preserve()
+ ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], first[1])
+ ctx.fill()
+ else:
+ ctx.stroke()
+ ctx.set_line_width(1.0)
bar_h = 55
meminfo_bar_h = 2 * bar_h
@@ -301,274 +312,344 @@ CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
OPTIONS = None
def extents(options, xscale, trace):
- start = min(trace.start.keys())
- end = start
-
- processes = 0
- for proc in trace.processes:
- if not options.app_options.show_all and \
- trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
- continue
-
- if trace.processes[proc][1] > end:
- end = trace.processes[proc][1]
- processes += 1
-
- if trace.min is not None and trace.max is not None:
- start = trace.min
- end = trace.max
-
- w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
- h = proc_h * processes + header_h + 2 * off_y
-
- return (w, h)
+ start = min(trace.start.keys())
+ end = start
+
+ processes = 0
+ for proc in trace.processes:
+ if not options.app_options.show_all and \
+ trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
+ continue
+
+ if trace.processes[proc][1] > end:
+ end = trace.processes[proc][1]
+ processes += 1
+
+ if trace.min is not None and trace.max is not None:
+ start = trace.min
+ end = trace.max
+
+ w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
+ h = proc_h * processes + header_h + 2 * off_y
+
+ if options.charts:
+ if trace.cpu_stats:
+ h += 30 + bar_h
+ if trace.disk_stats:
+ h += 30 + bar_h
+ if trace.monitor_disk:
+ h += 30 + bar_h
+ if trace.mem_stats:
+ h += meminfo_bar_h
+
+ # Allow for width of process legend and offset
+ if w < (720 + off_x):
+ w = 720 + off_x
+
+ return (w, h)
def clip_visible(clip, rect):
- xmax = max (clip[0], rect[0])
- ymax = max (clip[1], rect[1])
- xmin = min (clip[0] + clip[2], rect[0] + rect[2])
- ymin = min (clip[1] + clip[3], rect[1] + rect[3])
- return (xmin > xmax and ymin > ymax)
+ xmax = max (clip[0], rect[0])
+ ymax = max (clip[1], rect[1])
+ xmin = min (clip[0] + clip[2], rect[0] + rect[2])
+ ymin = min (clip[1] + clip[3], rect[1] + rect[3])
+ return (xmin > xmax and ymin > ymax)
def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
- proc_tree = options.proc_tree(trace)
-
- # render bar legend
- ctx.set_font_size(LEGEND_FONT_SIZE)
-
- draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O wait
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
- proc_tree, None)
- # render CPU load
- draw_chart (ctx, CPU_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
- proc_tree, None)
-
- curr_y = curr_y + 30 + bar_h
-
- # render second chart
- draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+ proc_tree = options.proc_tree(trace)
+
+ # render bar legend
+ if trace.cpu_stats:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+
+ draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O wait
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree, None)
+ # render CPU load
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render second chart
+ if trace.disk_stats:
+ draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
# render I/O utilization
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.util) for sample in trace.disk_stats], \
- proc_tree, None)
-
- # render disk throughput
- max_sample = max (trace.disk_stats, key = lambda s: s.tput)
- if clip_visible (clip, chart_rect):
- draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
- [(sample.time, sample.tput) for sample in trace.disk_stats], \
- proc_tree, None)
-
- pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
-
- shift_x, shift_y = -20, 20
- if (pos_x < off_x + 245):
- shift_x, shift_y = 5, 40
-
- label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
- draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
-
- curr_y = curr_y + 30 + bar_h
-
- # render mem usage
- chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
- mem_stats = trace.mem_stats
- if mem_stats and clip_visible (clip, chart_rect):
- mem_scale = max(sample.records['MemTotal'] - sample.records['MemFree'] for sample in mem_stats)
- draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
- draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
- draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.records['SwapTotal'] - sample.records['SwapFree'])/1024 for sample in mem_stats]), \
- MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_annotations(ctx, proc_tree, trace.times, chart_rect)
- draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
- [(sample.time, sample.records['MemTotal'] - sample.records['MemFree']) for sample in trace.mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
- [(sample.time, sample.records['MemTotal'] - sample.records['MemFree'] - sample.records['Buffers']) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
- [(sample.time, sample.records['Cached']) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
- [(sample.time, float(sample.records['SwapTotal'] - sample.records['SwapFree'])) for sample in mem_stats], \
- proc_tree, None)
-
- curr_y = curr_y + meminfo_bar_h
-
- return curr_y
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.util) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ # render disk throughput
+ max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+ [(sample.time, sample.tput) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+ label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+ draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render disk space usage
+ #
+ # Draws the amount of disk space used on each volume relative to the
+ # lowest recorded amount. The graphs for each volume are stacked above
+ # each other so that total disk usage is visible.
+ if trace.monitor_disk:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+ # Determine set of volumes for which we have
+ # information and the minimal amount of used disk
+ # space for each. Currently samples are allowed to
+ # not have a values for all volumes; drawing could be
+ # made more efficient if that wasn't the case.
+ volumes = set()
+ min_used = {}
+ for sample in trace.monitor_disk:
+ for volume, used in sample.records.items():
+ volumes.add(volume)
+ if volume not in min_used or min_used[volume] > used:
+ min_used[volume] = used
+ volumes = sorted(list(volumes))
+ disk_scale = 0
+ for i, volume in enumerate(volumes):
+ volume_scale = max([sample.records[volume] - min_used[volume]
+ for sample in trace.monitor_disk
+ if volume in sample.records])
+ # Does not take length of volume name into account, but fixed offset
+ # works okay in practice.
+ draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+ VOLUME_COLORS[i % len(VOLUME_COLORS)],
+ off_x + i * 250, curr_y+20, leg_s)
+ disk_scale += volume_scale
+
+ # render used amount of disk space
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ for i in range(len(volumes), 0, -1):
+ draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+ [(sample.time,
+ # Sum up used space of all volumes including the current one
+ # so that the graphs appear as stacked on top of each other.
+ functools.reduce(lambda x,y: x+y,
+ [sample.records[volume] - min_used[volume]
+ for volume in volumes[0:i]
+ if volume in sample.records],
+ 0))
+ for sample in trace.monitor_disk], \
+ proc_tree, [0, disk_scale])
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render mem usage
+ chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ mem_stats = trace.mem_stats
+ if mem_stats and clip_visible (clip, chart_rect):
+ mem_scale = max(sample.buffers for sample in mem_stats)
+ draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
+ draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
+ [(sample.time, sample.buffers) for sample in trace.mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
+ [(sample.time, sample.used) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
+ [(sample.time, sample.cached) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
+ [(sample.time, float(sample.swap)) for sample in mem_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + meminfo_bar_h
+
+ return curr_y
def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - (curr_y+header_h) + proc_h]
-
- draw_legend_box (ctx, "Configure", \
- TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Compile", \
- TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Install", \
- TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Populate Sysroot", \
- TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package", \
- TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package Write",
- TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
-
- ctx.set_font_size(PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
-
- y = curr_y+header_h
-
- offset = trace.min or min(trace.start.keys())
- for s in sorted(trace.start.keys()):
- for val in sorted(trace.start[s]):
- if not options.app_options.show_all and \
- trace.processes[val][1] - s < options.app_options.mintime:
- continue
- task = val.split(":")[1]
- #print val
- #print trace.processes[val][1]
- #print s
- x = chart_rect[0] + (s - offset) * sec_w
- w = ((trace.processes[val][1] - s) * sec_w)
-
- #print "proc at %s %s %s %s" % (x, y, w, proc_h)
- col = None
- if task == "do_compile":
- col = TASK_COLOR_COMPILE
- elif task == "do_configure":
- col = TASK_COLOR_CONFIGURE
- elif task == "do_install":
- col = TASK_COLOR_INSTALL
- elif task == "do_populate_sysroot":
- col = TASK_COLOR_SYSROOT
- elif task == "do_package":
- col = TASK_COLOR_PACKAGE
- elif task == "do_package_write_rpm" or \
+ chart_rect = [off_x, curr_y+header_h, w, h - curr_y - 1 * off_y - header_h ]
+
+ draw_legend_box (ctx, "Configure", \
+ TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Compile", \
+ TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Install", \
+ TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Populate Sysroot", \
+ TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package", \
+ TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package Write", \
+ TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
+
+ ctx.set_font_size(PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
+
+ y = curr_y+header_h
+
+ offset = trace.min or min(trace.start.keys())
+ for start in sorted(trace.start.keys()):
+ for process in sorted(trace.start[start]):
+ if not options.app_options.show_all and \
+ trace.processes[process][1] - start < options.app_options.mintime:
+ continue
+ task = process.split(":")[1]
+
+ #print(process)
+ #print(trace.processes[process][1])
+ #print(s)
+
+ x = chart_rect[0] + (start - offset) * sec_w
+ w = ((trace.processes[process][1] - start) * sec_w)
+
+ #print("proc at %s %s %s %s" % (x, y, w, proc_h))
+ col = None
+ if task == "do_compile":
+ col = TASK_COLOR_COMPILE
+ elif task == "do_configure":
+ col = TASK_COLOR_CONFIGURE
+ elif task == "do_install":
+ col = TASK_COLOR_INSTALL
+ elif task == "do_populate_sysroot":
+ col = TASK_COLOR_SYSROOT
+ elif task == "do_package":
+ col = TASK_COLOR_PACKAGE
+ elif task == "do_package_write_rpm" or \
task == "do_package_write_deb" or \
task == "do_package_write_ipk":
- col = TASK_COLOR_PACKAGE_WRITE
- else:
- col = WHITE
+ col = TASK_COLOR_PACKAGE_WRITE
+ else:
+ col = WHITE
- if col:
- draw_fill_rect(ctx, col, (x, y, w, proc_h))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ if col:
+ draw_fill_rect(ctx, col, (x, y, w, proc_h))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
- y = y + proc_h
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+ y = y + proc_h
- return curr_y
+ return curr_y
#
# Render the chart.
#
def render(ctx, options, xscale, trace):
- (w, h) = extents (options, xscale, trace)
- global OPTIONS
- OPTIONS = options.app_options
+ (w, h) = extents (options, xscale, trace)
+ global OPTIONS
+ OPTIONS = options.app_options
- # x, y, w, h
- clip = ctx.clip_extents()
+ # x, y, w, h
+ clip = ctx.clip_extents()
- sec_w = int (xscale * sec_w_base)
- ctx.set_line_width(1.0)
- ctx.select_font_face(FONT_NAME)
- draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
- w -= 2*off_x
- curr_y = off_y;
+ sec_w = int (xscale * sec_w_base)
+ ctx.set_line_width(1.0)
+ ctx.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
+ w -= 2*off_x
+ curr_y = off_y;
- curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
+ if options.charts:
+ curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
- return
+ curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
- proc_tree = options.proc_tree (trace)
+ return
- # draw the title and headers
- if proc_tree.idle:
- duration = proc_tree.idle
- else:
- duration = proc_tree.duration
+ proc_tree = options.proc_tree (trace)
- if not options.kernel_only:
- curr_y = draw_header (ctx, trace.headers, duration)
- else:
- curr_y = off_y;
+ # draw the title and headers
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
- if options.charts:
- curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+ if not options.kernel_only:
+ curr_y = draw_header (ctx, trace.headers, duration)
+ else:
+ curr_y = off_y;
- # draw process boxes
- proc_height = h
- if proc_tree.taskstats and options.cumulative:
- proc_height -= CUML_HEIGHT
+ # draw process boxes
+ proc_height = h
+ if proc_tree.taskstats and options.cumulative:
+ proc_height -= CUML_HEIGHT
- draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
- curr_y, w, proc_height, sec_w)
+ draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
+ curr_y, w, proc_height, sec_w)
- curr_y = proc_height
- ctx.set_font_size(SIG_FONT_SIZE)
- draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
+ curr_y = proc_height
+ ctx.set_font_size(SIG_FONT_SIZE)
+ draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
- # draw a cumulative CPU-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
+ # draw a cumulative CPU-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
- # draw a cumulative I/O-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
+ # draw a cumulative I/O-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
- header_size = 0
- if not options.kernel_only:
- draw_legend_box (ctx, "Running (%cpu)",
- PROC_COLOR_R, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Unint.sleep (I/O)",
- PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Sleeping",
- PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Zombie",
- PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
- header_size = 45
-
- chart_rect = [off_x, curr_y + header_size + 15,
- w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
- ctx.set_font_size (PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks (ctx, chart_rect, sec_w)
- if sec_w > 100:
- nsec = 1
- else:
- nsec = 5
- draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
- draw_annotations (ctx, proc_tree, times, chart_rect)
-
- y = curr_y + 60
- for root in proc_tree.process_tree:
- draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
- y = y + proc_h * proc_tree.num_nodes([root])
+ header_size = 0
+ if not options.kernel_only:
+ draw_legend_box (ctx, "Running (%cpu)",
+ PROC_COLOR_R, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Unint.sleep (I/O)",
+ PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Sleeping",
+ PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Zombie",
+ PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
+ header_size = 45
+
+ chart_rect = [off_x, curr_y + header_size + 15,
+ w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
+ ctx.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ if sec_w > 100:
+ nsec = 1
+ else:
+ nsec = 5
+ draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
+ draw_annotations (ctx, proc_tree, times, chart_rect)
+
+ y = curr_y + 60
+ for root in proc_tree.process_tree:
+ draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
+ y = y + proc_h * proc_tree.num_nodes([root])
def draw_header (ctx, headers, duration):
@@ -604,291 +685,291 @@ def draw_header (ctx, headers, duration):
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
- x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
- w = ((proc.duration) * rect[2] / proc_tree.duration)
-
- draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- ipid = int(proc.pid)
- if not OPTIONS.show_all:
- cmdString = proc.cmd
- else:
- cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
- cmdString = cmdString + " [" + str(ipid // 1000) + "]"
- if OPTIONS.show_all:
- if proc.args:
- cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
- else:
- cmdString = cmdString + " " + proc.exe
-
- draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
-
- next_y = y + proc_h
- for child in proc.child_list:
- if next_y > clip[1] + clip[3]:
- break
- child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
- draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
- next_y = next_y + proc_h * proc_tree.num_nodes([child])
-
- return x, y
+ x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ w = ((proc.duration) * rect[2] / proc_tree.duration)
+
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ ipid = int(proc.pid)
+ if not OPTIONS.show_all:
+ cmdString = proc.cmd
+ else:
+ cmdString = ''
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ cmdString = cmdString + " [" + str(ipid // 1000) + "]"
+ if OPTIONS.show_all:
+ if proc.args:
+ cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = cmdString + " " + proc.exe
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+
+ next_y = y + proc_h
+ for child in proc.child_list:
+ if next_y > clip[1] + clip[3]:
+ break
+ child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
+ draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
+ next_y = next_y + proc_h * proc_tree.num_nodes([child])
+
+ return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
- if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
- return
+ if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
+ return
- draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+ draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
- last_tx = -1
- for sample in proc.samples :
- tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
+ last_tx = -1
+ for sample in proc.samples :
+ tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
- # samples are sorted chronologically
- if tx < clip[0]:
- continue
- if tx > clip[0] + clip[2]:
- break
+ # samples are sorted chronologically
+ if tx < clip[0]:
+ continue
+ if tx > clip[0] + clip[2]:
+ break
- tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
- if last_tx != -1 and abs(last_tx - tx) <= tw:
- tw -= last_tx - tx
- tx = last_tx
- tw = max (tw, 1) # nice to see at least something
+ tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
+ if last_tx != -1 and abs(last_tx - tx) <= tw:
+ tw -= last_tx - tx
+ tx = last_tx
+ tw = max (tw, 1) # nice to see at least something
- last_tx = tx + tw
- state = get_proc_state( sample.state )
+ last_tx = tx + tw
+ state = get_proc_state( sample.state )
- color = STATE_COLORS[state]
- if state == STATE_RUNNING:
- alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
- color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
-# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
- elif state == STATE_SLEEPING:
- continue
+ color = STATE_COLORS[state]
+ if state == STATE_RUNNING:
+ alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
+ color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
+# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
+ elif state == STATE_SLEEPING:
+ continue
- draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
+ draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
- ctx.set_source_rgba(*DEP_COLOR)
- ctx.set_dash([2, 2])
- if abs(px - x) < 3:
- dep_off_x = 3
- dep_off_y = proc_h / 4
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, py - dep_off_y)
- ctx.line_to(px, py - dep_off_y)
- else:
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px, y + proc_h / 2)
- ctx.line_to(px, py)
- ctx.stroke()
- ctx.set_dash([])
+ ctx.set_source_rgba(*DEP_COLOR)
+ ctx.set_dash([2, 2])
+ if abs(px - x) < 3:
+ dep_off_x = 3
+ dep_off_y = proc_h / 4
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, py - dep_off_y)
+ ctx.line_to(px, py - dep_off_y)
+ else:
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px, y + proc_h / 2)
+ ctx.line_to(px, py)
+ ctx.stroke()
+ ctx.set_dash([])
# elide the bootchart collector - it is quite distorting
def elide_bootchart(proc):
- return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+ return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
class CumlSample:
- def __init__(self, proc):
- self.cmd = proc.cmd
- self.samples = []
- self.merge_samples (proc)
- self.color = None
-
- def merge_samples(self, proc):
- self.samples.extend (proc.samples)
- self.samples.sort (key = lambda p: p.time)
-
- def next(self):
- global palette_idx
- palette_idx += HSV_STEP
- return palette_idx
-
- def get_color(self):
- if self.color is None:
- i = self.next() % HSV_MAX_MOD
- h = 0.0
- if i is not 0:
- h = (1.0 * i) / HSV_MAX_MOD
- s = 0.5
- v = 1.0
- c = colorsys.hsv_to_rgb (h, s, v)
- self.color = (c[0], c[1], c[2], 1.0)
- return self.color
+ def __init__(self, proc):
+ self.cmd = proc.cmd
+ self.samples = []
+ self.merge_samples (proc)
+ self.color = None
+
+ def merge_samples(self, proc):
+ self.samples.extend (proc.samples)
+ self.samples.sort (key = lambda p: p.time)
+
+ def next(self):
+ global palette_idx
+ palette_idx += HSV_STEP
+ return palette_idx
+
+ def get_color(self):
+ if self.color is None:
+ i = self.next() % HSV_MAX_MOD
+ h = 0.0
+ if i is not 0:
+ h = (1.0 * i) / HSV_MAX_MOD
+ s = 0.5
+ v = 1.0
+ c = colorsys.hsv_to_rgb (h, s, v)
+ self.color = (c[0], c[1], c[2], 1.0)
+ return self.color
def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
- global palette_idx
- palette_idx = 0
-
- time_hash = {}
- total_time = 0.0
- m_proc_list = {}
-
- if stat_type is STAT_TYPE_CPU:
- sample_value = 'cpu'
- else:
- sample_value = 'io'
- for proc in proc_tree.process_list:
- if elide_bootchart(proc):
- continue
-
- for sample in proc.samples:
- total_time += getattr(sample.cpu_sample, sample_value)
- if not sample.time in time_hash:
- time_hash[sample.time] = 1
-
- # merge pids with the same cmd
- if not proc.cmd in m_proc_list:
- m_proc_list[proc.cmd] = CumlSample (proc)
- continue
- s = m_proc_list[proc.cmd]
- s.merge_samples (proc)
-
- # all the sample times
- times = sorted(time_hash)
- if len (times) < 2:
- print("degenerate boot chart")
- return
-
- pix_per_ns = chart_bounds[3] / total_time
-# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
-
- # FIXME: we have duplicates in the process list too [!] - why !?
-
- # Render bottom up, left to right
- below = {}
- for time in times:
- below[time] = chart_bounds[1] + chart_bounds[3]
-
- # same colors each time we render
- random.seed (0)
-
- ctx.set_line_width(1)
-
- legends = []
- labels = []
-
- # render each pid in order
- for cs in m_proc_list.values():
- row = {}
- cuml = 0.0
-
- # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
- for sample in cs.samples:
- cuml += getattr(sample.cpu_sample, sample_value)
- row[sample.time] = cuml
-
- process_total_time = cuml
-
- # hide really tiny processes
- if cuml * pix_per_ns <= 2:
- continue
-
- last_time = times[0]
- y = last_below = below[last_time]
- last_cuml = cuml = 0.0
-
- ctx.set_source_rgba(*cs.get_color())
- for time in times:
- render_seg = False
-
- # did the underlying trend increase ?
- if below[time] != last_below:
- last_below = below[last_time]
- last_cuml = cuml
- render_seg = True
-
- # did we move up a pixel increase ?
- if time in row:
- nc = round (row[time] * pix_per_ns)
- if nc != cuml:
- last_cuml = cuml
- cuml = nc
- render_seg = True
-
-# if last_cuml > cuml:
-# assert fail ... - un-sorted process samples
-
- # draw the trailing rectangle from the last time to
- # before now, at the height of the last segment.
- if render_seg:
- w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
- ctx.fill()
-# ctx.stroke()
- last_time = time
- y = below [time] - cuml
-
- row[time] = y
-
- # render the last segment
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- y = below[last_time] - cuml
- ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
- ctx.fill()
-# ctx.stroke()
-
- # render legend if it will fit
- if cuml > 8:
- label = cs.cmd
- extnts = ctx.text_extents(label)
- label_w = extnts[2]
- label_h = extnts[3]
-# print "Text extents %g by %g" % (label_w, label_h)
- labels.append((label,
- chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
- y + (cuml + label_h) / 2))
- if cs in legends:
- print("ARGH - duplicate process in list !")
-
- legends.append ((cs, process_total_time))
-
- below = row
-
- # render grid-lines over the top
- draw_box_ticks(ctx, chart_bounds, sec_w)
-
- # render labels
- for l in labels:
- draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
-
- # Render legends
- font_height = 20
- label_width = 300
- LEGENDS_PER_COL = 15
- LEGENDS_TOTAL = 45
- ctx.set_font_size (TITLE_FONT_SIZE)
- dur_secs = duration / 100
- cpu_secs = total_time / 1000000000
-
- # misleading - with multiple CPUs ...
-# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
- if stat_type is STAT_TYPE_CPU:
- label = "Cumulative CPU usage, by process; total CPU: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
- else:
- label = "Cumulative I/O usage, by process; total I/O: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
-
- draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
- chart_bounds[1] + font_height)
-
- i = 0
- legends = sorted(legends, key=itemgetter(1), reverse=True)
- ctx.set_font_size(TEXT_FONT_SIZE)
- for t in legends:
- cs = t[0]
- time = t[1]
- x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
- y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
- str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
- draw_legend_box(ctx, str, cs.color, x, y, leg_s)
- i = i + 1
- if i >= LEGENDS_TOTAL:
- break
+ global palette_idx
+ palette_idx = 0
+
+ time_hash = {}
+ total_time = 0.0
+ m_proc_list = {}
+
+ if stat_type is STAT_TYPE_CPU:
+ sample_value = 'cpu'
+ else:
+ sample_value = 'io'
+ for proc in proc_tree.process_list:
+ if elide_bootchart(proc):
+ continue
+
+ for sample in proc.samples:
+ total_time += getattr(sample.cpu_sample, sample_value)
+ if not sample.time in time_hash:
+ time_hash[sample.time] = 1
+
+ # merge pids with the same cmd
+ if not proc.cmd in m_proc_list:
+ m_proc_list[proc.cmd] = CumlSample (proc)
+ continue
+ s = m_proc_list[proc.cmd]
+ s.merge_samples (proc)
+
+ # all the sample times
+ times = sorted(time_hash)
+ if len (times) < 2:
+ print("degenerate boot chart")
+ return
+
+ pix_per_ns = chart_bounds[3] / total_time
+# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
+
+ # FIXME: we have duplicates in the process list too [!] - why !?
+
+ # Render bottom up, left to right
+ below = {}
+ for time in times:
+ below[time] = chart_bounds[1] + chart_bounds[3]
+
+ # same colors each time we render
+ random.seed (0)
+
+ ctx.set_line_width(1)
+
+ legends = []
+ labels = []
+
+ # render each pid in order
+ for cs in m_proc_list.values():
+ row = {}
+ cuml = 0.0
+
+ # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
+ for sample in cs.samples:
+ cuml += getattr(sample.cpu_sample, sample_value)
+ row[sample.time] = cuml
+
+ process_total_time = cuml
+
+ # hide really tiny processes
+ if cuml * pix_per_ns <= 2:
+ continue
+
+ last_time = times[0]
+ y = last_below = below[last_time]
+ last_cuml = cuml = 0.0
+
+ ctx.set_source_rgba(*cs.get_color())
+ for time in times:
+ render_seg = False
+
+ # did the underlying trend increase ?
+ if below[time] != last_below:
+ last_below = below[last_time]
+ last_cuml = cuml
+ render_seg = True
+
+ # did we move up a pixel increase ?
+ if time in row:
+ nc = round (row[time] * pix_per_ns)
+ if nc != cuml:
+ last_cuml = cuml
+ cuml = nc
+ render_seg = True
+
+# if last_cuml > cuml:
+# assert fail ... - un-sorted process samples
+
+ # draw the trailing rectangle from the last time to
+ # before now, at the height of the last segment.
+ if render_seg:
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.fill()
+# ctx.stroke()
+ last_time = time
+ y = below [time] - cuml
+
+ row[time] = y
+
+ # render the last segment
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ y = below[last_time] - cuml
+ ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.fill()
+# ctx.stroke()
+
+ # render legend if it will fit
+ if cuml > 8:
+ label = cs.cmd
+ extnts = ctx.text_extents(label)
+ label_w = extnts[2]
+ label_h = extnts[3]
+# print "Text extents %g by %g" % (label_w, label_h)
+ labels.append((label,
+ chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ y + (cuml + label_h) / 2))
+ if cs in legends:
+ print("ARGH - duplicate process in list !")
+
+ legends.append ((cs, process_total_time))
+
+ below = row
+
+ # render grid-lines over the top
+ draw_box_ticks(ctx, chart_bounds, sec_w)
+
+ # render labels
+ for l in labels:
+ draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+
+ # Render legends
+ font_height = 20
+ label_width = 300
+ LEGENDS_PER_COL = 15
+ LEGENDS_TOTAL = 45
+ ctx.set_font_size (TITLE_FONT_SIZE)
+ dur_secs = duration / 100
+ cpu_secs = total_time / 1000000000
+
+ # misleading - with multiple CPUs ...
+# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
+ if stat_type is STAT_TYPE_CPU:
+ label = "Cumulative CPU usage, by process; total CPU: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+ else:
+ label = "Cumulative I/O usage, by process; total I/O: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+
+ draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ chart_bounds[1] + font_height)
+
+ i = 0
+ legends = sorted(legends, key=itemgetter(1), reverse=True)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+ for t in legends:
+ cs = t[0]
+ time = t[1]
+ x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
+ str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
+ draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ i = i + 1
+ if i >= LEGENDS_TOTAL:
+ break
diff --git a/scripts/pybootchartgui/pybootchartgui/gui.py b/scripts/pybootchartgui/pybootchartgui/gui.py
index 7fedd232df..e1fe915563 100644
--- a/scripts/pybootchartgui/pybootchartgui/gui.py
+++ b/scripts/pybootchartgui/pybootchartgui/gui.py
@@ -13,64 +13,83 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-import gobject
-import gtk
-import gtk.gdk
-import gtk.keysyms
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk as gtk
+from gi.repository import Gtk
+from gi.repository import Gdk
+from gi.repository import GObject as gobject
+from gi.repository import GObject
+
from . import draw
from .draw import RenderOptions
-class PyBootchartWidget(gtk.DrawingArea):
+class PyBootchartWidget(gtk.DrawingArea, gtk.Scrollable):
__gsignals__ = {
- 'expose-event': 'override',
- 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
+ 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, Gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
+ hadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ hscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+ vadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ vscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
- self.set_flags(gtk.CAN_FOCUS)
+ self.set_can_focus(True)
- self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
- self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
- self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
+ self.connect("draw", self.on_draw)
+
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self.hadj = None
- self.vadj = None
- self.hadj_changed_signal_id = None
- self.vadj_changed_signal_id = None
-
- def do_expose_event(self, event):
- cr = self.window.cairo_create()
-
- # set a clip region for the expose event
- cr.rectangle(
- event.area.x, event.area.y,
- event.area.width, event.area.height
- )
- cr.clip()
- self.draw(cr, self.get_allocation())
- return False
-
- def draw(self, cr, rect):
+ self.our_width, self.our_height = self.chart_width, self.chart_height
+
+ self.hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj.connect('value-changed', self.on_adjustments_changed)
+ self.hadj.connect('value-changed', self.on_adjustments_changed)
+
+ def bound_vals(self):
+ self.x = max(0, self.x)
+ self.y = max(0, self.y)
+ self.x = min(self.chart_width - self.our_width, self.x)
+ self.y = min(self.chart_height - self.our_height, self.y)
+
+ def on_draw(self, darea, cr):
+ # set a clip region
+ #cr.rectangle(
+ # self.x, self.y,
+ # self.chart_width, self.chart_height
+ #)
+ #cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
@@ -84,7 +103,7 @@ class PyBootchartWidget(gtk.DrawingArea):
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
- self._set_scroll_adjustments (self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
def zoom_to_rect (self, rect):
@@ -122,126 +141,101 @@ class PyBootchartWidget(gtk.DrawingArea):
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self._set_scroll_adjustments(self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
- if event.keyval == gtk.keysyms.Left:
+ if event.keyval == Gdk.keyval_from_name("Left"):
self.x -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Right:
+ elif event.keyval == Gdk.keyval_from_name("Right"):
self.x += self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Up:
+ elif event.keyval == Gdk.keyval_from_name("Up"):
self.y -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Down:
+ elif event.keyval == Gdk.keyval_from_name("Down"):
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
+ self.bound_vals()
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
- if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
+ if event.type not in (Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
- if event.state & gtk.gdk.CONTROL_MASK:
- if event.direction == gtk.gdk.SCROLL_UP:
+ if event.state & Gdk.CONTROL_MASK:
+ if event.direction == Gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
- if event.direction == gtk.gdk.SCROLL_DOWN:
+ if event.direction == Gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
- if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ if state & Gdk.ModifierType.BUTTON2_MASK or state & Gdk.ModifierType.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
+ self.bound_vals()
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
- def on_set_scroll_adjustments(self, area, hadj, vadj):
- self._set_scroll_adjustments (hadj, vadj)
-
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
+ self.our_width = allocation.width
+ if self.chart_width < self.our_width:
+ self.our_width = self.chart_width
+ self.our_height = allocation.height
+ if self.chart_height < self.our_height:
+ self.our_height = self.chart_height
+ self._set_scroll_adjustments()
def _set_adj_upper(self, adj, upper):
- changed = False
- value_changed = False
-
- if adj.upper != upper:
- adj.upper = upper
- changed = True
-
- max_value = max(0.0, upper - adj.page_size)
- if adj.value > max_value:
- adj.value = max_value
- value_changed = True
-
- if changed:
- adj.changed()
- if value_changed:
- adj.value_changed()
-
- def _set_scroll_adjustments(self, hadj, vadj):
- if hadj == None:
- hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
- if vadj == None:
- vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
-
- if self.hadj_changed_signal_id != None and \
- self.hadj != None and hadj != self.hadj:
- self.hadj.disconnect (self.hadj_changed_signal_id)
- if self.vadj_changed_signal_id != None and \
- self.vadj != None and vadj != self.vadj:
- self.vadj.disconnect (self.vadj_changed_signal_id)
-
- if hadj != None:
- self.hadj = hadj
- self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
- self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
-
- if vadj != None:
- self.vadj = vadj
- self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
- self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
+
+ if adj.get_upper() != upper:
+ adj.set_upper(upper)
+
+ def _set_scroll_adjustments(self):
+ self._set_adj_upper (self.hadj, self.zoom_ratio * (self.chart_width - self.our_width))
+ self._set_adj_upper (self.vadj, self.zoom_ratio * (self.chart_height - self.our_height))
def on_adjustments_changed(self, adj):
- self.x = self.hadj.value / self.zoom_ratio
- self.y = self.vadj.value / self.zoom_ratio
+ self.x = self.hadj.get_value() / self.zoom_ratio
+ self.y = self.vadj.get_value() / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
- self.hadj.value = x * self.zoom_ratio
- self.vadj.value = y * self.zoom_ratio
-
-PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
+ self.hadj.set_value(x * self.zoom_ratio)
+ #self.hadj.value_changed()
+ self.vadj.set_value(y * self.zoom_ratio)
class PyBootchartShell(gtk.VBox):
ui = '''
@@ -260,7 +254,7 @@ class PyBootchartShell(gtk.VBox):
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
- self.widget = PyBootchartWidget(trace, options, xscale)
+ self.widget2 = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
@@ -275,12 +269,12 @@ class PyBootchartShell(gtk.VBox):
# Create actions
actiongroup.add_actions((
- ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
- ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
- ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
- ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
- ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
- ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ('Expand', gtk.STOCK_ADD, None, None, None, self.widget2.on_expand),
+ ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget2.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget2.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget2.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget2.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget2.on_zoom_100),
))
# Add the actiongroup to the uimanager
@@ -290,29 +284,33 @@ class PyBootchartShell(gtk.VBox):
uimanager.add_ui_from_string(self.ui)
# Scrolled window
- scrolled = gtk.ScrolledWindow()
- scrolled.add(self.widget)
+ scrolled = gtk.ScrolledWindow(self.widget2.hadj, self.widget2.vadj)
+ scrolled.add(self.widget2)
+
+ #scrolled.set_hadjustment()
+ #scrolled.set_vadjustment(self.widget2.vadj)
+ scrolled.set_policy(gtk.PolicyType.ALWAYS, gtk.PolicyType.ALWAYS)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
- hbox.pack_start(toolbar, True, True)
+ hbox.pack_start(toolbar, True, True, 0)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
- button.connect ('toggled', self.widget.show_toggled)
+ button.connect ('toggled', self.widget2.show_toggled)
button.set_active(options.app_options.show_all)
- hbox.pack_start (button, False, True)
+ hbox.pack_start (button, False, True, 0)
- self.pack_start(hbox, False)
- self.pack_start(scrolled)
+ self.pack_start(hbox, False, True, 0)
+ self.pack_start(scrolled, True, True, 0)
self.show_all()
def grab_focus(self, window):
- window.set_focus(self.widget)
+ window.set_focus(self.widget2)
class PyBootchartWindow(gtk.Window):
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index a3a0b0b339..ef2d3d309c 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -38,16 +38,18 @@ class Trace:
self.min = None
self.max = None
self.headers = None
- self.disk_stats = None
+ self.disk_stats = []
self.ps_stats = None
self.taskstats = None
- self.cpu_stats = None
+ self.cpu_stats = []
self.cmdline = None
self.kernel = None
self.kernel_tree = None
self.filename = None
self.parent_map = None
- self.mem_stats = None
+ self.mem_stats = []
+ self.monitor_disk = None
+ self.times = [] # Always empty, but expected by draw.py when drawing system charts.
if len(paths):
parse_paths (writer, self, paths)
@@ -58,6 +60,19 @@ class Trace:
self.min = min(self.start.keys())
self.max = max(self.end.keys())
+
+ # Rendering system charts depends on start and end
+ # time. Provide them where the original drawing code expects
+ # them, i.e. in proc_tree.
+ class BitbakeProcessTree:
+ def __init__(self, start_time, end_time):
+ self.start_time = start_time
+ self.end_time = end_time
+ self.duration = self.end_time - self.start_time
+ self.proc_tree = BitbakeProcessTree(min(self.start.keys()),
+ max(self.end.keys()))
+
+
return
# Turn that parsed information into something more useful
@@ -252,7 +267,7 @@ def _parse_headers(file):
value = line.strip()
headers[last] += value
return headers, last
- return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
+ return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0]
def _parse_timed_blocks(file):
"""Parses (ie., splits) a file into so-called timed-blocks. A
@@ -266,7 +281,7 @@ def _parse_timed_blocks(file):
return (int(lines[0]), lines[1:])
except ValueError:
raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
- blocks = file.read().decode('utf-8').split('\n\n')
+ blocks = file.read().split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
def _parse_proc_ps_log(writer, file):
@@ -427,7 +442,13 @@ def _parse_proc_stat_log(file):
# skip the rest of statistics lines
return samples
-def _parse_proc_disk_stat_log(file, numCpu):
+def _parse_reduced_log(file, sample_class):
+ samples = []
+ for time, lines in _parse_timed_blocks(file):
+ samples.append(sample_class(time, *[float(x) for x in lines[0].split()]))
+ return samples
+
+def _parse_proc_disk_stat_log(file):
"""
Parse file for disk stats, but only look at the whole device, eg. sda,
not sda1, sda2 etc. The format of relevant lines should be:
@@ -462,12 +483,31 @@ def _parse_proc_disk_stat_log(file, numCpu):
sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ]
readTput = sums[0] / 2.0 * 100.0 / interval
writeTput = sums[1] / 2.0 * 100.0 / interval
- util = float( sums[2] ) / 10 / interval / numCpu
+ util = float( sums[2] ) / 10 / interval
util = max(0.0, min(1.0, util))
disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util))
return disk_stats
+def _parse_reduced_proc_meminfo_log(file):
+ """
+ Parse file for global memory statistics with
+ 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree' values
+ (in that order) directly stored on one line.
+ """
+ used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
+
+ mem_stats = []
+ for time, lines in _parse_timed_blocks(file):
+ sample = MemSample(time)
+ for name, value in zip(used_values, lines[0].split()):
+ sample.add_value(name, int(value))
+
+ if sample.valid():
+ mem_stats.append(DrawMemSample(sample))
+
+ return mem_stats
+
def _parse_proc_meminfo_log(file):
"""
Parse file for global memory statistics.
@@ -484,14 +524,37 @@ def _parse_proc_meminfo_log(file):
for line in lines:
match = meminfo_re.match(line)
if not match:
- raise ParseError("Invalid meminfo line \"%s\"" % match.groups(0))
+ raise ParseError("Invalid meminfo line \"%s\"" % line)
sample.add_value(match.group(1), int(match.group(2)))
if sample.valid():
- mem_stats.append(sample)
+ mem_stats.append(DrawMemSample(sample))
return mem_stats
+def _parse_monitor_disk_log(file):
+ """
+ Parse file with information about amount of diskspace used.
+ The format of relevant lines should be: ^volume path: number-of-bytes?
+ """
+ disk_stats = []
+ diskinfo_re = re.compile(r'^(.+):\s*(\d+)$')
+
+ for time, lines in _parse_timed_blocks(file):
+ sample = DiskSpaceSample(time)
+
+ for line in lines:
+ match = diskinfo_re.match(line)
+ if not match:
+ raise ParseError("Invalid monitor_disk line \"%s\"" % line)
+ sample.add_value(match.group(1), int(match.group(2)))
+
+ if sample.valid():
+ disk_stats.append(sample)
+
+ return disk_stats
+
+
# if we boot the kernel with: initcall_debug printk.time=1 we can
# get all manner of interesting data from the dmesg output
# We turn this into a pseudo-process tree: each event is
@@ -514,7 +577,7 @@ def _parse_dmesg(writer, file):
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
t = timestamp_re.match (line)
if t is None:
# print "duff timestamp " + line
@@ -602,7 +665,7 @@ def _parse_pacct(writer, file):
def _parse_paternity_log(writer, file):
parent_map = {}
parent_map[0] = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
if not line:
continue
elems = line.split(' ') # <Child> <Parent>
@@ -615,7 +678,7 @@ def _parse_paternity_log(writer, file):
def _parse_cmdline_log(writer, file):
cmdLines = {}
- for block in file.read().decode('utf-8').split('\n\n'):
+ for block in file.read().split('\n\n'):
lines = block.split('\n')
if len (lines) >= 3:
# print "Lines '%s'" % (lines[0])
@@ -628,6 +691,20 @@ def _parse_cmdline_log(writer, file):
cmdLines[pid] = values
return cmdLines
+def _parse_bitbake_buildstats(writer, state, filename, file):
+ paths = filename.split("/")
+ task = paths[-1]
+ pn = paths[-2]
+ start = None
+ end = None
+ for line in file:
+ if line.startswith("Started:"):
+ start = int(float(line.split()[-1]))
+ elif line.startswith("Ended:"):
+ end = int(float(line.split()[-1]))
+ if start and end:
+ state.add_process(pn + ":" + task, start, end)
+
def get_num_cpus(headers):
"""Get the number of CPUs from the system.cpu header property. As the
CPU utilization graphs are relative, the number of CPUs currently makes
@@ -647,18 +724,25 @@ def get_num_cpus(headers):
def _do_parse(writer, state, filename, file):
writer.info("parsing '%s'" % filename)
t1 = clock()
- paths = filename.split("/")
- task = paths[-1]
- pn = paths[-2]
- start = None
- end = None
- for line in file:
- if line.startswith("Started:"):
- start = int(float(line.split()[-1]))
- elif line.startswith("Ended:"):
- end = int(float(line.split()[-1]))
- if start and end:
- state.add_process(pn + ":" + task, start, end)
+ name = os.path.basename(filename)
+ if name == "proc_diskstats.log":
+ state.disk_stats = _parse_proc_disk_stat_log(file)
+ elif name == "reduced_proc_diskstats.log":
+ state.disk_stats = _parse_reduced_log(file, DiskSample)
+ elif name == "proc_stat.log":
+ state.cpu_stats = _parse_proc_stat_log(file)
+ elif name == "reduced_proc_stat.log":
+ state.cpu_stats = _parse_reduced_log(file, CPUSample)
+ elif name == "proc_meminfo.log":
+ state.mem_stats = _parse_proc_meminfo_log(file)
+ elif name == "reduced_proc_meminfo.log":
+ state.mem_stats = _parse_reduced_proc_meminfo_log(file)
+ elif name == "cmdline2.log":
+ state.cmdline = _parse_cmdline_log(writer, file)
+ elif name == "monitor_disk.log":
+ state.monitor_disk = _parse_monitor_disk_log(file)
+ elif not filename.endswith('.log'):
+ _parse_bitbake_buildstats(writer, state, filename, file)
t2 = clock()
writer.info(" %s seconds" % str(t2-t1))
return state
@@ -667,7 +751,7 @@ def parse_file(writer, state, filename):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
- with open(filename, "rb") as file:
+ with open(filename, "r") as file:
return _do_parse(writer, state, filename, file)
def parse_paths(writer, state, paths):
diff --git a/scripts/pybootchartgui/pybootchartgui/samples.py b/scripts/pybootchartgui/pybootchartgui/samples.py
index 015d743aa0..9fc309b3ab 100644
--- a/scripts/pybootchartgui/pybootchartgui/samples.py
+++ b/scripts/pybootchartgui/pybootchartgui/samples.py
@@ -53,6 +53,33 @@ class MemSample:
# discard incomplete samples
return [v for v in MemSample.used_values if v not in keys] == []
+class DrawMemSample:
+ """
+ Condensed version of a MemSample with exactly the values used by the drawing code.
+ Initialized either from a valid MemSample or
+ a tuple/list of buffer/used/cached/swap values.
+ """
+ def __init__(self, mem_sample):
+ self.time = mem_sample.time
+ if isinstance(mem_sample, MemSample):
+ self.buffers = mem_sample.records['MemTotal'] - mem_sample.records['MemFree']
+ self.used = mem_sample.records['MemTotal'] - mem_sample.records['MemFree'] - mem_sample.records['Buffers']
+ self.cached = mem_sample.records['Cached']
+ self.swap = mem_sample.records['SwapTotal'] - mem_sample.records['SwapFree']
+ else:
+ self.buffers, self.used, self.cached, self.swap = mem_sample
+
+class DiskSpaceSample:
+ def __init__(self, time):
+ self.time = time
+ self.records = {}
+
+ def add_value(self, name, value):
+ self.records[name] = value
+
+ def valid(self):
+ return bool(self.records)
+
class ProcessSample:
def __init__(self, time, state, cpu_sample):
self.time = time
diff --git a/scripts/pythondeps b/scripts/pythondeps
index 590b9769e7..be21dd84eb 100755
--- a/scripts/pythondeps
+++ b/scripts/pythondeps
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Determine dependencies of python scripts or available python modules in a search path.
#
# Given the -d argument and a filename/filenames, returns the modules imported by those files.
@@ -9,7 +11,8 @@
import argparse
import ast
-import imp
+import importlib
+from importlib import machinery
import logging
import os.path
import sys
@@ -17,10 +20,7 @@ import sys
logger = logging.getLogger('pythondeps')
-suffixes = []
-for triple in imp.get_suffixes():
- suffixes.append(triple[0])
-
+suffixes = importlib.machinery.all_suffixes()
class PythonDepError(Exception):
pass
diff --git a/scripts/recipetool b/scripts/recipetool
index 1052cd2b22..e2d585d2c5 100755
--- a/scripts/recipetool
+++ b/scripts/recipetool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -36,8 +26,8 @@ def tinfoil_init(parserecipes):
import bb.tinfoil
import logging
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
- tinfoil.prepare(not parserecipes)
tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(not parserecipes)
return tinfoil
def main():
@@ -73,13 +63,14 @@ def main():
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
+ scriptpath.add_oe_lib_path()
scriptutils.logger_setup_color(logger, global_args.color)
tinfoil = tinfoil_init(False)
try:
- for path in ([scripts_path] +
- tinfoil.config_data.getVar('BBPATH', True).split(':')):
+ for path in (tinfoil.config_data.getVar('BBPATH').split(':')
+ + [scripts_path]):
pluginpath = os.path.join(path, 'lib', 'recipetool')
scriptutils.load_plugins(logger, plugins, pluginpath)
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index e47b4d916e..8c0fdb986a 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -2,18 +2,7 @@
#
# Copyright (c) 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This script is called by the SDK installer script. It replaces the dynamic
@@ -103,6 +92,8 @@ def change_interpreter(elf_file_name):
fname.startswith(b("/lib32/")) or fname.startswith(b("/usr/lib32/")) or \
fname.startswith(b("/usr/lib32/")) or fname.startswith(b("/usr/lib64/")):
break
+ if p_filesz == 0:
+ break
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
diff --git a/scripts/resulttool b/scripts/resulttool
new file mode 100755
index 0000000000..fc282bda6c
--- /dev/null
+++ b/scripts/resulttool
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# test results tool - tool for manipulating OEQA test result json files
+# (merge results, summarise results, regression analysis, generate manual test results file)
+#
+# To look for help information.
+# $ resulttool
+#
+# To store test results from oeqa automated tests, execute the below
+# $ resulttool store <source_dir> <git_branch>
+#
+# To merge test results, execute the below
+# $ resulttool merge <base_result_file> <target_result_file>
+#
+# To report test report, execute the below
+# $ resulttool report <source_dir>
+#
+# To perform regression file analysis, execute the below
+# $ resulttool regression-file <base_result_file> <target_result_file>
+#
+# To execute manual test cases, execute the below
+# $ resulttool manualexecution <manualjsonfile>
+#
+# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import argparse
+import logging
+script_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = script_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+import resulttool.merge
+import resulttool.store
+import resulttool.regression
+import resulttool.report
+import resulttool.manualexecution
+import resulttool.log
+logger = scriptutils.logger_create('resulttool')
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+ subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
+ resulttool.manualexecution.register_commands(subparsers)
+ subparsers.add_subparser_group('setup', 'setup', 200)
+ resulttool.merge.register_commands(subparsers)
+ resulttool.store.register_commands(subparsers)
+ subparsers.add_subparser_group('analysis', 'analysis', 100)
+ resulttool.regression.register_commands(subparsers)
+ resulttool.report.register_commands(subparsers)
+ resulttool.log.register_commands(subparsers)
+
+ args = parser.parse_args()
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ try:
+ ret = args.func(args, logger)
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+ return ret
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index 5df8c0f705..876c53c5d9 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -1,53 +1,54 @@
-#!/bin/sh
-
-# This comes from the RPM5 5.4.0 distribution.
-
-pkg=$1
-if [ "$pkg" = "" -o ! -e "$pkg" ]; then
- echo "no package supplied" 1>&2
- exit 1
-fi
-
-leadsize=96
-o=`expr $leadsize + 8`
-set `od -j $o -N 8 -t u1 $pkg`
-il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
-dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
-# echo "sig il: $il dl: $dl"
-
-sigsize=`expr 8 + 16 \* $il + $dl`
-o=`expr $o + $sigsize + \( 8 - \( $sigsize \% 8 \) \) \% 8 + 8`
-set `od -j $o -N 8 -t u1 $pkg`
-il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
-dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
-# echo "hdr il: $il dl: $dl"
-
-hdrsize=`expr 8 + 16 \* $il + $dl`
-o=`expr $o + $hdrsize`
-EXTRACTOR="dd if=$pkg ibs=$o skip=1"
-
-COMPRESSION=`($EXTRACTOR |file -) 2>/dev/null`
-if echo $COMPRESSION |grep -iq gzip; then
- DECOMPRESSOR=gunzip
-elif echo $COMPRESSION |grep -iq bzip2; then
- DECOMPRESSOR=bunzip2
-elif echo $COMPRESSION |grep -iq xz; then
- DECOMPRESSOR=unxz
-elif echo $COMPRESSION |grep -iq cpio; then
- DECOMPRESSOR=cat
-else
- # Most versions of file don't support LZMA, therefore we assume
- # anything not detected is LZMA
- DECOMPRESSOR=`which unlzma 2>/dev/null`
- case "$DECOMPRESSOR" in
- /* ) ;;
- * ) DECOMPRESSOR=`which lzmash 2>/dev/null`
- case "$DECOMPRESSOR" in
- /* ) DECOMPRESSOR="lzmash -d -c" ;;
- * ) DECOMPRESSOR=cat ;;
- esac
- ;;
- esac
-fi
-
-$EXTRACTOR 2>/dev/null | $DECOMPRESSOR
+#!/bin/sh -efu
+# This file comes from rpm 4.x distribution
+
+fatal() {
+ echo "$*" >&2
+ exit 1
+}
+
+pkg="$1"
+[ -n "$pkg" -a -e "$pkg" ] ||
+ fatal "No package supplied"
+
+_dd() {
+ local o="$1"; shift
+ dd if="$pkg" skip="$o" iflag=skip_bytes status=none $*
+}
+
+calcsize() {
+ offset=$(($1 + 8))
+
+ local i b b0 b1 b2 b3 b4 b5 b6 b7
+
+ i=0
+ while [ $i -lt 8 ]; do
+ b="$(_dd $(($offset + $i)) bs=1 count=1)"
+ [ -z "$b" ] &&
+ b="0" ||
+ b="$(exec printf '%u\n' "'$b")"
+ eval "b$i=\$b"
+ i=$(($i + 1))
+ done
+
+ rsize=$((8 + ((($b0 << 24) + ($b1 << 16) + ($b2 << 8) + $b3) << 4) + ($b4 << 24) + ($b5 << 16) + ($b6 << 8) + $b7))
+ offset=$(($offset + $rsize))
+}
+
+case "$(_dd 0 bs=8 count=1)" in
+ "$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+esac
+
+calcsize 96
+sigsize=$rsize
+
+calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
+hdrsize=$rsize
+
+case "$(_dd $offset bs=3 count=1)" in
+ "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
+ *) fatal "Unrecognized rpm file: $pkg" ;;
+esac
diff --git a/scripts/runqemu b/scripts/runqemu
index 45bcad7a80..ef454d67ff 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -5,18 +5,8 @@
# Copyright (C) 2006-2011 Linux Foundation
# Copyright (c) 2016 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
@@ -27,15 +17,20 @@ import fcntl
import shutil
import glob
import configparser
+import signal
+
+class RunQemuError(Exception):
+ """Custom exception to raise on known errors."""
+ pass
-class OEPathError(Exception):
+class OEPathError(RunQemuError):
"""Custom Exception to give better guidance on missing binaries"""
def __init__(self, message):
- self.message = "In order for this script to dynamically infer paths\n \
+ super().__init__("In order for this script to dynamically infer paths\n \
kernels or filesystem images, you either need bitbake in your PATH\n \
or to source oe-init-build-env before running this script.\n\n \
Dynamic path inference can be avoided by passing a *.qemuboot.conf to\n \
-runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message
+runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message)
def create_logger():
@@ -44,7 +39,7 @@ def create_logger():
# create console handler and set level to debug
ch = logging.StreamHandler()
- ch.setLevel(logging.INFO)
+ ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
@@ -64,78 +59,56 @@ def print_usage():
Usage: you can run this script with any valid combination
of the following environment variables (in any order):
KERNEL - the kernel image file to use
+ BIOS - the bios image file to use
ROOTFS - the rootfs image file or nfsroot directory to use
+ DEVICE_TREE - the device tree blob to use
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ sdl - choose the SDL UI frontend
+ gtk - choose the Gtk UI frontend
+ gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
+ gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
+ egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
serial - enable a serial console on /dev/ttyS0
+ serialstdio - enable a serial console on the console (regardless of graphics mode)
slirp - enable user networking, no root privileges is required
+ snapshot - don't write changes to back to images
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
+ [*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
- biosdir=<dir> - specify custom bios dir
- biosfilename=<filename> - specify bios filename
qemuparams=<xyz> - specify custom parameters to QEMU
bootparams=<xyz> - specify custom kernel parameters during boot
- help: print this text
+ help, -h, --help: print this text
+ -d, --debug: Enable debug output
+ -q, --quiet: Hide most output except error messages
Examples:
+ runqemu
runqemu qemuarm
runqemu tmp/deploy/images/qemuarm
- runqemu tmp/deploy/images/qemux86/.qemuboot.conf
+ runqemu tmp/deploy/images/qemux86/<qemuboot.conf>
runqemu qemux86-64 core-image-sato ext4
runqemu qemux86-64 wic-image-minimal wic
runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
- runqemu qemux86 iso/hddimg/vmdk/qcow2/vdi/ramfs/cpio.gz...
+ runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz...
runqemu qemux86 qemuparams="-m 256"
runqemu qemux86 bootparams="psplash=false"
- runqemu path/to/<image>-<machine>.vmdk
runqemu path/to/<image>-<machine>.wic
+ runqemu path/to/<image>-<machine>.wic.vmdk
""")
def check_tun():
- """Check /dev/net/run"""
+ """Check /dev/net/tun"""
dev_tun = '/dev/net/tun'
if not os.path.exists(dev_tun):
- raise Exception("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
+ raise RunQemuError("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
if not os.access(dev_tun, os.W_OK):
- raise Exception("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-
-def check_libgl(qemu_bin):
- cmd = 'ldd %s' % qemu_bin
- logger.info('Running %s...' % cmd)
- need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
- if re.search('libGLU', need_gl):
- # We can't run without a libGL.so
- libgl = False
- check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
- ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
- ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
-
- for (f1, f2) in check_files:
- if re.search('\*', f1):
- for g1 in glob.glob(f1):
- if libgl:
- break
- if os.path.exists(g1):
- for g2 in glob.glob(f2):
- if os.path.exists(g2):
- libgl = True
- break
- if libgl:
- break
- else:
- if os.path.exists(f1) and os.path.exists(f2):
- libgl = True
- break
- if not libgl:
- logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
- logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
- logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
- raise Exception('%s requires libGLU, but not found' % qemu_bin)
+ raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
def get_first_file(cmds):
"""Return first file found in wildcard cmds"""
@@ -149,24 +122,43 @@ def get_first_file(cmds):
class BaseConfig(object):
def __init__(self):
- # Vars can be merged with .qemuboot.conf, use a dict to manage them.
- self.d = {
- 'MACHINE': '',
- 'DEPLOY_DIR_IMAGE': '',
- 'QB_KERNEL_ROOT': '/dev/vda',
- }
+ # The self.d saved vars from self.set(), part of them are from qemuboot.conf
+ self.d = {'QB_KERNEL_ROOT': '/dev/vda'}
+
+ # Supported env vars, add it here if a var can be got from env,
+ # and don't use os.getenv in the code.
+ self.env_vars = ('MACHINE',
+ 'ROOTFS',
+ 'KERNEL',
+ 'BIOS',
+ 'DEVICE_TREE',
+ 'DEPLOY_DIR_IMAGE',
+ 'OE_TMPDIR',
+ 'OECORE_NATIVE_SYSROOT',
+ )
self.qemu_opt = ''
self.qemu_opt_script = ''
- self.nfs_dir = ''
+ self.qemuparams = ''
self.clean_nfs_dir = False
self.nfs_server = ''
self.rootfs = ''
+ # File name(s) of a OVMF firmware file or variable store,
+ # to be added with -drive if=pflash.
+ # Found in the same places as the rootfs, with or without one of
+ # these suffices: qcow2, bin.
+ self.ovmf_bios = []
+ # When enrolling default Secure Boot keys, the hypervisor
+ # must provide the Platform Key and the first Key Exchange Key
+ # certificate in the Type 11 SMBIOS table.
+ self.ovmf_secboot_pkkek1 = ''
self.qemuboot = ''
self.qbconfload = False
self.kernel = ''
+ self.bios = ''
self.kernel_cmdline = ''
self.kernel_cmdline_script = ''
+ self.bootparams = ''
self.dtb = ''
self.fstype = ''
self.kvm_enabled = False
@@ -174,39 +166,114 @@ class BaseConfig(object):
self.slirp_enabled = False
self.nfs_instance = 0
self.nfs_running = False
+ self.serialconsole = False
self.serialstdio = False
self.cleantap = False
self.saved_stty = ''
self.audio_enabled = False
self.tcpserial_portnum = ''
- self.custombiosdir = ''
- self.lock = ''
- self.lock_descriptor = ''
+ self.taplock = ''
+ self.taplock_descriptor = None
+ self.portlocks = {}
self.bitbake_e = ''
self.snapshot = False
- self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs', 'cpio.gz', 'cpio', 'ramfs')
- self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'vmdk', 'qcow2', 'vdi', 'iso')
+ self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi')
+ self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
+ 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
+ self.vmtypes = ('hddimg', 'iso')
+ self.fsinfo = {}
+ self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+ # Use different mac section for tap and slirp to avoid
+ # conflicts, e.g., when one is running with tap, the other is
+ # running with slirp.
+ # The last section is dynamic, which is for avoiding conflicts,
+ # when multiple qemus are running, e.g., when multiple tap or
+ # slirp qemus are running.
+ self.mac_tap = "52:54:00:12:34:"
+ self.mac_slirp = "52:54:00:12:35:"
+ # pid of the actual qemu process
+ self.qemupid = None
+ # avoid cleanup twice
+ self.cleaned = False
+
+ def acquire_taplock(self, error=True):
+ logger.debug("Acquiring lockfile %s..." % self.taplock)
+ try:
+ self.taplock_descriptor = open(self.taplock, 'w')
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ msg = "Acquiring lockfile %s failed: %s" % (self.taplock, e)
+ if error:
+ logger.error(msg)
+ else:
+ logger.info(msg)
+ if self.taplock_descriptor:
+ self.taplock_descriptor.close()
+ self.taplock_descriptor = None
+ return False
+ return True
+
+ def release_taplock(self):
+ if self.taplock_descriptor:
+ logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
+ self.taplock_descriptor.close()
+ os.remove(self.taplock)
+ self.taplock_descriptor = None
+
+ def check_free_port(self, host, port, lockdir):
+ """ Check whether the port is free or not """
+ import socket
+ from contextlib import closing
+
+ lockfile = os.path.join(lockdir, str(port) + '.lock')
+ if self.acquire_portlock(lockfile):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((host, port)) == 0:
+ # Port is open, so not free
+ self.release_portlock(lockfile)
+ return False
+ else:
+ # Port is not open, so free
+ return True
+ else:
+ return False
- def acquire_lock(self):
- logger.info("Acquiring lockfile %s..." % self.lock)
+ def acquire_portlock(self, lockfile):
+ logger.debug("Acquiring lockfile %s..." % lockfile)
try:
- self.lock_descriptor = open(self.lock, 'w')
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ portlock_descriptor = open(lockfile, 'w')
+ self.portlocks.update({lockfile: portlock_descriptor})
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_EX|fcntl.LOCK_NB)
except Exception as e:
- logger.info("Acquiring lockfile %s failed: %s" % (self.lock, e))
- if self.lock_descriptor:
- self.lock_descriptor.close()
+ msg = "Acquiring lockfile %s failed: %s" % (lockfile, e)
+ logger.info(msg)
+ if lockfile in self.portlocks.keys() and self.portlocks[lockfile]:
+ self.portlocks[lockfile].close()
+ del self.portlocks[lockfile]
return False
return True
- def release_lock(self):
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
- self.lock_descriptor.close()
- os.remove(self.lock)
+ def release_portlock(self, lockfile=None):
+ if lockfile != None:
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
+ self.portlocks[lockfile].close()
+ os.remove(lockfile)
+ del self.portlocks[lockfile]
+ elif len(self.portlocks):
+ for lockfile, descriptor in self.portlocks.items():
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ fcntl.flock(descriptor, fcntl.LOCK_UN)
+ descriptor.close()
+ os.remove(lockfile)
+ self.portlocks = {}
def get(self, key):
if key in self.d:
return self.d.get(key)
+ elif os.getenv(key):
+ return os.getenv(key)
else:
return ''
@@ -216,10 +283,10 @@ class BaseConfig(object):
def is_deploy_dir_image(self, p):
if os.path.isdir(p):
if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M):
- logger.info("Can't find required *.qemuboot.conf in %s" % p)
+ logger.debug("Can't find required *.qemuboot.conf in %s" % p)
return False
- if not re.search('-image-', '\n'.join(os.listdir(p))):
- logger.info("Can't find *-image-* in %s" % p)
+ if not any(map(lambda name: '-image-' in name, os.listdir(p))):
+ logger.debug("Can't find *-image-* in %s" % p)
return False
return True
else:
@@ -227,30 +294,31 @@ class BaseConfig(object):
def check_arg_fstype(self, fst):
"""Check and set FSTYPE"""
- if fst not in self.fstypes + self.vmtypes:
- logger.warn("Maybe unsupported FSTYPE: %s" % fst)
+ if fst not in self.fstypes + self.vmtypes + self.wictypes:
+ logger.warning("Maybe unsupported FSTYPE: %s" % fst)
if not self.fstype or self.fstype == fst:
if fst == 'ramfs':
fst = 'cpio.gz'
+ if fst in ('tar.bz2', 'tar.gz'):
+ fst = 'nfs'
self.fstype = fst
else:
- raise Exception("Conflicting: FSTYPE %s and %s" % (self.fstype, fst))
+ raise RunQemuError("Conflicting: FSTYPE %s and %s" % (self.fstype, fst))
def set_machine_deploy_dir(self, machine, deploy_dir_image):
"""Set MACHINE and DEPLOY_DIR_IMAGE"""
- logger.info('MACHINE: %s' % machine)
+ logger.debug('MACHINE: %s' % machine)
self.set("MACHINE", machine)
- logger.info('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image)
+ logger.debug('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image)
self.set("DEPLOY_DIR_IMAGE", deploy_dir_image)
def check_arg_nfs(self, p):
if os.path.isdir(p):
- self.nfs_dir = p
+ self.rootfs = p
else:
m = re.match('(.*):(.*)', p)
self.nfs_server = m.group(1)
- self.nfs_dir = m.group(2)
- self.rootfs = ""
+ self.rootfs = m.group(2)
self.check_arg_fstype('nfs')
def check_arg_path(self, p):
@@ -259,6 +327,7 @@ class BaseConfig(object):
- Check whether is a kernel file
- Check whether is a image file
- Check whether it is a nfs dir
+ - Check whether it is a OVMF flash file
"""
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
@@ -267,38 +336,53 @@ class BaseConfig(object):
re.search('zImage', p) or re.search('vmlinux', p) or \
re.search('fitImage', p) or re.search('uImage', p):
self.kernel = p
- elif os.path.exists(p) and (not os.path.isdir(p)) and re.search('-image-', os.path.basename(p)):
+ elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
self.rootfs = p
- dirpath = os.path.dirname(p)
- m = re.search('(.*)\.(.*)$', p)
- if m:
- qb = '%s%s' % (re.sub('\.rootfs$', '', m.group(1)), '.qemuboot.conf')
+ # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+ # otherwise, its type would be "gz", which is incorrect.
+ fst = ""
+ for t in self.fstypes:
+ if p.endswith(t):
+ fst = t
+ break
+ if not fst:
+ m = re.search('.*\.(.*)$', self.rootfs)
+ if m:
+ fst = m.group(1)
+ if fst:
+ self.check_arg_fstype(fst)
+ qb = re.sub('\.' + fst + "$", '', self.rootfs)
+ qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
if os.path.exists(qb):
self.qemuboot = qb
self.qbconfload = True
else:
- logger.warn("%s doesn't exist" % qb)
- fst = m.group(2)
- self.check_arg_fstype(fst)
+ logger.warning("%s doesn't exist" % qb)
else:
- raise Exception("Can't find FSTYPE from: %s" % p)
- elif os.path.isdir(p) or re.search(':', arg) and re.search('/', arg):
+ raise RunQemuError("Can't find FSTYPE from: %s" % p)
+
+ elif os.path.isdir(p) or re.search(':', p) and re.search('/', p):
if self.is_deploy_dir_image(p):
- logger.info('DEPLOY_DIR_IMAGE: %s' % p)
+ logger.debug('DEPLOY_DIR_IMAGE: %s' % p)
self.set("DEPLOY_DIR_IMAGE", p)
else:
- logger.info("Assuming %s is an nfs rootfs" % p)
+ logger.debug("Assuming %s is an nfs rootfs" % p)
self.check_arg_nfs(p)
+ elif os.path.basename(p).startswith('ovmf'):
+ self.ovmf_bios.append(p)
else:
- raise Exception("Unknown path arg %s" % p)
+ raise RunQemuError("Unknown path arg %s" % p)
def check_arg_machine(self, arg):
"""Check whether it is a machine"""
- if self.get('MACHINE') and self.get('MACHINE') != arg or re.search('/', arg):
- raise Exception("Unknown arg: %s" % arg)
- elif self.get('MACHINE') == arg:
+ if self.get('MACHINE') == arg:
return
- logger.info('Assuming MACHINE = %s' % arg)
+ elif self.get('MACHINE') and self.get('MACHINE') != arg:
+ raise RunQemuError("Maybe conflicted MACHINE: %s vs %s" % (self.get('MACHINE'), arg))
+ elif re.search('/', arg):
+ raise RunQemuError("Unknown arg: %s" % arg)
+
+ logger.debug('Assuming MACHINE = %s' % arg)
# if we're running under testimage, or similarly as a child
# of an existing bitbake invocation, we can't invoke bitbake
@@ -306,20 +390,20 @@ class BaseConfig(object):
# FIXME: testimage.bbclass exports these two variables into env,
# are there other scenarios in which we need to support being
# invoked by bitbake?
- deploy = os.environ.get('DEPLOY_DIR_IMAGE')
- bbchild = deploy and os.environ.get('OE_TMPDIR')
+ deploy = self.get('DEPLOY_DIR_IMAGE')
+ bbchild = deploy and self.get('OE_TMPDIR')
if bbchild:
self.set_machine_deploy_dir(arg, deploy)
return
# also check whether we're running under a sourced toolchain
# environment file
- if os.environ.get('OECORE_NATIVE_SYSROOT'):
+ if self.get('OECORE_NATIVE_SYSROOT'):
self.set("MACHINE", arg)
return
cmd = 'MACHINE=%s bitbake -e' % arg
logger.info('Running %s...' % cmd)
- self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
# bitbake -e doesn't report invalid MACHINE as an error, so
# let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
# MACHINE.
@@ -327,7 +411,7 @@ class BaseConfig(object):
if s:
deploy_dir_image = s.group(1)
else:
- raise Exception("bitbake -e %s" % self.bitbake_e)
+ raise RunQemuError("bitbake -e %s" % self.bitbake_e)
if self.is_deploy_dir_image(deploy_dir_image):
self.set_machine_deploy_dir(arg, deploy_dir_image)
else:
@@ -335,15 +419,62 @@ class BaseConfig(object):
self.set("MACHINE", arg)
def check_args(self):
+ for debug in ("-d", "--debug"):
+ if debug in sys.argv:
+ logger.setLevel(logging.DEBUG)
+ sys.argv.remove(debug)
+
+ for quiet in ("-q", "--quiet"):
+ if quiet in sys.argv:
+ logger.setLevel(logging.ERROR)
+ sys.argv.remove(quiet)
+
unknown_arg = ""
for arg in sys.argv[1:]:
- if arg in self.fstypes + self.vmtypes:
+ if arg in self.fstypes + self.vmtypes + self.wictypes:
self.check_arg_fstype(arg)
elif arg == 'nographic':
self.qemu_opt_script += ' -nographic'
self.kernel_cmdline_script += ' console=ttyS0'
+ elif arg == 'sdl':
+ if 'gl' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display sdl,gl=on'
+ elif 'gl-es' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display sdl,gl=es'
+ else:
+ self.qemu_opt_script += ' -display sdl'
+ elif arg == 'gtk':
+ if 'gl' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display gtk,gl=on'
+ elif 'gl-es' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display gtk,gl=es'
+ else:
+ self.qemu_opt_script += ' -display gtk'
+ elif arg == 'gl' or arg == 'gl-es':
+ # These args are handled inside sdl or gtk blocks above
+ pass
+ elif arg == 'egl-headless':
+ self.qemu_opt_script += ' -vga virtio -display egl-headless'
+ # As runqemu can be run within bitbake (when using testimage, for example),
+ # we need to ensure that we run host pkg-config, and that it does not
+ # get mis-directed to native build paths set by bitbake.
+ try:
+ del os.environ['PKG_CONFIG_PATH']
+ del os.environ['PKG_CONFIG_DIR']
+ del os.environ['PKG_CONFIG_LIBDIR']
+ del os.environ['PKG_CONFIG_SYSROOT_DIR']
+ except KeyError:
+ pass
+ try:
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
+ except subprocess.CalledProcessError as e:
+ raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
+ os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
elif arg == 'serial':
self.kernel_cmdline_script += ' console=ttyS0'
+ self.serialconsole = True
+ elif arg == "serialstdio":
+ self.kernel_cmdline_script += ' console=ttyS0'
self.serialstdio = True
elif arg == 'audio':
logger.info("Enabling audio in qemu")
@@ -360,43 +491,41 @@ class BaseConfig(object):
elif arg == 'publicvnc':
self.qemu_opt_script += ' -vnc :0'
elif arg.startswith('tcpserial='):
- self.tcpserial_portnum = arg[len('tcpserial='):]
- elif arg.startswith('biosdir='):
- self.custombiosdir = arg[len('biosdir='):]
- elif arg.startswith('biosfilename='):
- self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
- self.qemu_opt_script += ' %s' % arg[len('qemuparams='):]
+ self.qemuparams = ' %s' % arg[len('qemuparams='):]
elif arg.startswith('bootparams='):
- self.kernel_cmdline_script += ' %s' % arg[len('bootparams='):]
+ self.bootparams = arg[len('bootparams='):]
elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
self.check_arg_path(os.path.abspath(arg))
- elif re.search('-image-', arg):
+ elif re.search(r'-image-|-image$', arg):
# Lazy rootfs
self.rootfs = arg
+ elif arg.startswith('ovmf'):
+ self.ovmf_bios.append(arg)
else:
- # At last, assume is it the MACHINE
+ # At last, assume it is the MACHINE
if (not unknown_arg) or unknown_arg == arg:
unknown_arg = arg
else:
- raise Exception("Can't handle two unknown args: %s %s" % (unknown_arg, arg))
+ raise RunQemuError("Can't handle two unknown args: %s %s\n"
+ "Try 'runqemu help' on how to use it" % \
+ (unknown_arg, arg))
# Check to make sure it is a valid machine
- if unknown_arg:
- if self.get('MACHINE') == unknown_arg:
- return
- if not self.get('DEPLOY_DIR_IMAGE'):
- # Trying to get DEPLOY_DIR_IMAGE from env.
- p = os.getenv('DEPLOY_DIR_IMAGE')
- if p and self.is_deploy_dir_image(p):
- machine = os.path.basename(p)
- if unknown_arg == machine:
- self.set_machine_deploy_dir(machine, p)
- return
- else:
- logger.info('DEPLOY_DIR_IMAGE: %s' % p)
- self.set("DEPLOY_DIR_IMAGE", p)
+ if unknown_arg and self.get('MACHINE') != unknown_arg:
+ if self.get('DEPLOY_DIR_IMAGE'):
+ machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE'))
+ if unknown_arg == machine:
+ self.set("MACHINE", machine)
+
self.check_arg_machine(unknown_arg)
+ if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
+ self.load_bitbake_env()
+ s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
+ if s:
+ self.set("DEPLOY_DIR_IMAGE", s.group(1))
+
def check_kvm(self):
"""Check kvm and kvm-host"""
if not (self.kvm_enabled or self.vhost_enabled):
@@ -404,42 +533,48 @@ class BaseConfig(object):
return
if not self.get('QB_CPU_KVM'):
- raise Exception("QB_CPU_KVM is NULL, this board doesn't support kvm")
+ raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
dev_kvm = '/dev/kvm'
dev_vhost = '/dev/vhost-net'
- with open('/proc/cpuinfo', 'r') as f:
- kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
- if not kvm_cap:
- logger.error("You are trying to enable KVM on a cpu without VT support.")
- logger.error("Remove kvm from the command-line, or refer:")
- raise Exception(yocto_kvm_wiki)
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise RunQemuError(yocto_kvm_wiki)
if not os.path.exists(dev_kvm):
logger.error("Missing KVM device. Have you inserted kvm modules?")
logger.error("For further help see:")
- raise Exception(yocto_kvm_wiki)
+ raise RunQemuError(yocto_kvm_wiki)
if os.access(dev_kvm, os.W_OK|os.R_OK):
self.qemu_opt_script += ' -enable-kvm'
+ if self.get('MACHINE') == "qemux86":
+ # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
+ # See YOCTO #12301
+ # On 64 bit we use x2apic
+ self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
else:
logger.error("You have no read or write permission on /dev/kvm.")
logger.error("Please change the ownership of this file as described at:")
- raise Exception(yocto_kvm_wiki)
+ raise RunQemuError(yocto_kvm_wiki)
if self.vhost_enabled:
if not os.path.exists(dev_vhost):
logger.error("Missing virtio net device. Have you inserted vhost-net module?")
logger.error("For further help see:")
- raise Exception(yocto_paravirt_kvm_wiki)
+ raise RunQemuError(yocto_paravirt_kvm_wiki)
if not os.access(dev_kvm, os.W_OK|os.R_OK):
logger.error("You have no read or write permission on /dev/vhost-net.")
logger.error("Please change the ownership of this file as described at:")
- raise Exception(yocto_kvm_wiki)
+ raise RunQemuError(yocto_kvm_wiki)
def check_fstype(self):
"""Check and setup FSTYPE"""
@@ -448,11 +583,54 @@ class BaseConfig(object):
if fstype:
self.fstype = fstype
else:
- raise Exception("FSTYPE is NULL!")
+ raise RunQemuError("FSTYPE is NULL!")
+
+ # parse QB_FSINFO into dict, e.g. { 'wic': ['no-kernel-in-fs', 'a-flag'], 'ext4': ['another-flag']}
+ wic_fs = False
+ qb_fsinfo = self.get('QB_FSINFO')
+ if qb_fsinfo:
+ qb_fsinfo = qb_fsinfo.split()
+ for fsinfo in qb_fsinfo:
+ try:
+ fstype, fsflag = fsinfo.split(':')
+
+ if fstype == 'wic':
+ if fsflag == 'no-kernel-in-fs':
+ wic_fs = True
+ elif fsflag == 'kernel-in-fs':
+ wic_fs = False
+ else:
+ logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ continue
+ else:
+ logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
+ continue
+
+ if fstype in self.fsinfo:
+ self.fsinfo[fstype].append(fsflag)
+ else:
+ self.fsinfo[fstype] = [fsflag]
+ except Exception:
+ logger.error('Invalid parameter "%s" in QB_FSINFO', fsinfo)
+
+ # treat wic images as vmimages (with kernel) or as fsimages (rootfs only)
+ if wic_fs:
+ self.fstypes = self.fstypes + self.wictypes
+ else:
+ self.vmtypes = self.vmtypes + self.wictypes
def check_rootfs(self):
"""Check and set rootfs"""
+ if self.fstype == "none":
+ return
+
+ if self.get('ROOTFS'):
+ if not self.rootfs:
+ self.rootfs = self.get('ROOTFS')
+ elif self.get('ROOTFS') != self.rootfs:
+ raise RunQemuError("Maybe conflicted ROOTFS: %s vs %s" % (self.get('ROOTFS'), self.rootfs))
+
if self.fstype == 'nfs':
return
@@ -467,99 +645,186 @@ class BaseConfig(object):
cmds = (cmd_name, cmd_link)
self.rootfs = get_first_file(cmds)
if not self.rootfs:
- raise Exception("Failed to find rootfs: %s or %s" % cmds)
+ raise RunQemuError("Failed to find rootfs: %s or %s" % cmds)
if not os.path.exists(self.rootfs):
- raise Exception("Can't find rootfs: %s" % self.rootfs)
+ raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
+
+ def setup_pkkek1(self):
+ """
+ Extract from PEM certificate the Platform Key and first Key
+ Exchange Key certificate string. The hypervisor needs to provide
+ it in the Type 11 SMBIOS table
+ """
+ pemcert = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), 'OvmfPkKek1.pem')
+ try:
+ with open(pemcert, 'r') as pemfile:
+ key = pemfile.read().replace('\n', ''). \
+ replace('-----BEGIN CERTIFICATE-----', ''). \
+ replace('-----END CERTIFICATE-----', '')
+ self.ovmf_secboot_pkkek1 = key
+
+ except FileNotFoundError:
+ raise RunQemuError("Can't open PEM certificate %s " % pemcert)
+
+ def check_ovmf(self):
+ """Check and set full path for OVMF firmware and variable file(s)."""
+
+ for index, ovmf in enumerate(self.ovmf_bios):
+ if os.path.exists(ovmf):
+ continue
+ for suffix in ('qcow2', 'bin'):
+ path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
+ if os.path.exists(path):
+ self.ovmf_bios[index] = path
+ if ovmf.endswith('secboot'):
+ self.setup_pkkek1()
+ break
+ else:
+ raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
def check_kernel(self):
- """Check and set kernel, dtb"""
+ """Check and set kernel"""
# The vm image doesn't need a kernel
if self.fstype in self.vmtypes:
return
+ # See if the user supplied a KERNEL option
+ if self.get('KERNEL'):
+ self.kernel = self.get('KERNEL')
+
+ # QB_DEFAULT_KERNEL is always a full file path
+ kernel_name = os.path.basename(self.get('QB_DEFAULT_KERNEL'))
+
+ # The user didn't want a kernel to be loaded
+ if kernel_name == "none" and not self.kernel:
+ return
+
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
if not self.kernel:
- kernel_match_name = "%s/%s" % (deploy_dir_image, self.get('QB_DEFAULT_KERNEL'))
+ kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
self.kernel = get_first_file(cmds)
if not self.kernel:
- raise Exception('KERNEL not found: %s, %s or %s' % cmds)
+ raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds)
if not os.path.exists(self.kernel):
- raise Exception("KERNEL %s not found" % self.kernel)
+ raise RunQemuError("KERNEL %s not found" % self.kernel)
+
+ def check_dtb(self):
+ """Check and set dtb"""
+ # Did the user specify a device tree?
+ if self.get('DEVICE_TREE'):
+ self.dtb = self.get('DEVICE_TREE')
+ if not os.path.exists(self.dtb):
+ raise RunQemuError('Specified DTB not found: %s' % self.dtb)
+ return
dtb = self.get('QB_DTB')
if dtb:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
cmd_match = "%s/%s" % (deploy_dir_image, dtb)
cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
cmd_wild = "%s/*.dtb" % deploy_dir_image
cmds = (cmd_match, cmd_startswith, cmd_wild)
self.dtb = get_first_file(cmds)
if not os.path.exists(self.dtb):
- raise Exception('DTB not found: %s, %s or %s' % cmds)
+ raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
+
+ def check_bios(self):
+ """Check and set bios"""
- def check_biosdir(self):
- """Check custombiosdir"""
- if not self.custombiosdir:
+ # See if the user supplied a BIOS option
+ if self.get('BIOS'):
+ self.bios = self.get('BIOS')
+
+ # QB_DEFAULT_BIOS is always a full file path
+ bios_name = os.path.basename(self.get('QB_DEFAULT_BIOS'))
+
+ # The user didn't want a bios to be loaded
+ if (bios_name == "" or bios_name == "none") and not self.bios:
return
- biosdir = ""
- biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
- biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
- for i in (self.custombiosdir, biosdir_native, biosdir_host):
- if os.path.isdir(i):
- biosdir = i
- break
+ if not self.bios:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ self.bios = "%s/%s" % (deploy_dir_image, bios_name)
+
+ if not self.bios:
+ raise RunQemuError('BIOS not found: %s' % bios_match_name)
+
+ if not os.path.exists(self.bios):
+ raise RunQemuError("KERNEL %s not found" % self.bios)
- if biosdir:
- logger.info("Assuming biosdir is: %s" % biosdir)
- self.qemu_opt_script += ' -L %s' % biosdir
- else:
- logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
- raise Exception("Invalid custombiosdir: %s" % self.custombiosdir)
def check_mem(self):
- s = re.search('-m +([0-9]+)', self.qemu_opt_script)
+ """
+ Both qemu and kernel needs memory settings, so check QB_MEM and set it
+ for both.
+ """
+ s = re.search('-m +([0-9]+)', self.qemuparams)
if s:
self.set('QB_MEM', '-m %s' % s.group(1))
elif not self.get('QB_MEM'):
- logger.info('QB_MEM is not set, use 512M by default')
- self.set('QB_MEM', '-m 512')
+ logger.info('QB_MEM is not set, use 256M by default')
+ self.set('QB_MEM', '-m 256')
+
+ # Check and remove M or m suffix
+ qb_mem = self.get('QB_MEM')
+ if qb_mem.endswith('M') or qb_mem.endswith('m'):
+ qb_mem = qb_mem[:-1]
+
+ # Add -m prefix it not present
+ if not qb_mem.startswith('-m'):
+ qb_mem = '-m %s' % qb_mem
+
+ self.set('QB_MEM', qb_mem)
+
+ mach = self.get('MACHINE')
+ if not mach.startswith('qemumips'):
+ self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
- self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
def check_tcpserial(self):
if self.tcpserial_portnum:
+ ports = self.tcpserial_portnum.split(':')
+ port = ports[0]
if self.get('QB_TCPSERIAL_OPT'):
- self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+
+ if len(ports) > 1:
+ for port in ports[1:]:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
self.validate_paths()
- check_tun()
+ if not self.slirp_enabled:
+ check_tun()
# Check audio
if self.audio_enabled:
if not self.get('QB_AUDIO_DRV'):
- raise Exception("QB_AUDIO_DRV is NULL, this board doesn't support audio")
+ raise RunQemuError("QB_AUDIO_DRV is NULL, this board doesn't support audio")
if not self.get('QB_AUDIO_OPT'):
- logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
+ logger.warning('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
else:
self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')
os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))
else:
os.putenv('QEMU_AUDIO_DRV', 'none')
+ self.check_qemu_system()
self.check_kvm()
self.check_fstype()
self.check_rootfs()
+ self.check_ovmf()
self.check_kernel()
- self.check_biosdir()
+ self.check_dtb()
+ self.check_bios()
self.check_mem()
self.check_tcpserial()
@@ -567,10 +832,8 @@ class BaseConfig(object):
if not self.qemuboot:
if self.get('DEPLOY_DIR_IMAGE'):
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
- elif os.getenv('DEPLOY_DIR_IMAGE'):
- deploy_dir_image = os.getenv('DEPLOY_DIR_IMAGE')
else:
- logger.info("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
+ logger.warning("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
return
if self.rootfs and not os.path.exists(self.rootfs):
@@ -582,10 +845,21 @@ class BaseConfig(object):
self.rootfs, machine)
else:
cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
- logger.info('Running %s...' % cmd)
- qbs = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ logger.debug('Running %s...' % cmd)
+ try:
+ qbs = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ raise RunQemuError(err)
if qbs:
- self.qemuboot = qbs.split()[0]
+ for qb in qbs.split():
+ # Don't use initramfs when other choices unless fstype is ramfs
+ if '-initramfs-' in os.path.basename(qb) and self.fstype != 'cpio.gz':
+ continue
+ self.qemuboot = qb
+ break
+ if not self.qemuboot:
+ # Use the first one when no choice
+ self.qemuboot = qbs.split()[0]
self.qbconfload = True
if not self.qemuboot:
@@ -594,14 +868,18 @@ class BaseConfig(object):
return
if not os.path.exists(self.qemuboot):
- raise Exception("Failed to find <image>.qemuboot.conf!")
+ raise RunQemuError("Failed to find %s (wrong image name or BSP does not support running under qemu?)." % self.qemuboot)
- logger.info('CONFFILE: %s' % self.qemuboot)
+ logger.debug('CONFFILE: %s' % self.qemuboot)
cf = configparser.ConfigParser()
cf.read(self.qemuboot)
for k, v in cf.items('config_bsp'):
k_upper = k.upper()
+ if v.startswith("../"):
+ v = os.path.abspath(os.path.dirname(self.qemuboot) + "/" + v)
+ elif v == ".":
+ v = os.path.dirname(self.qemuboot)
self.set(k_upper, v)
def validate_paths(self):
@@ -610,8 +888,8 @@ class BaseConfig(object):
# artefacts are relative to that file, rather than in whatever
# directory DEPLOY_DIR_IMAGE in the conf file points to.
if self.qbconfload:
- imgdir = os.path.dirname(self.qemuboot)
- if imgdir != self.get('DEPLOY_DIR_IMAGE'):
+ imgdir = os.path.realpath(os.path.dirname(self.qemuboot))
+ if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):
logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))
self.set('DEPLOY_DIR_IMAGE', imgdir)
@@ -626,7 +904,7 @@ class BaseConfig(object):
self.load_bitbake_env()
if self.bitbake_e:
- native_vars = ['STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE']
+ native_vars = ['STAGING_DIR_NATIVE']
for nv in native_vars:
s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M)
if s and s.group(1) != self.get(nv):
@@ -637,8 +915,8 @@ class BaseConfig(object):
# be able to call `bitbake -e`, then try:
# - get OE_TMPDIR from environment and guess paths based on it
# - get OECORE_NATIVE_SYSROOT from environment (for sdk)
- tmpdir = os.environ.get('OE_TMPDIR', None)
- oecore_native_sysroot = os.environ.get('OECORE_NATIVE_SYSROOT', None)
+ tmpdir = self.get('OE_TMPDIR')
+ oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')
if tmpdir:
logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)
hostos, _, _, _, machine = os.uname()
@@ -655,19 +933,30 @@ class BaseConfig(object):
self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
def print_config(self):
- logger.info('Continuing with the following parameters:\n')
+ logoutput = ['Continuing with the following parameters:']
if not self.fstype in self.vmtypes:
- print('KERNEL: [%s]' % self.kernel)
+ logoutput.append('KERNEL: [%s]' % self.kernel)
+ if self.bios:
+ logoutput.append('BIOS: [%s]' % self.bios)
if self.dtb:
- print('DTB: [%s]' % self.dtb)
- print('MACHINE: [%s]' % self.get('MACHINE'))
- print('FSTYPE: [%s]' % self.fstype)
+ logoutput.append('DTB: [%s]' % self.dtb)
+ logoutput.append('MACHINE: [%s]' % self.get('MACHINE'))
+ try:
+ fstype_flags = ' (' + ', '.join(self.fsinfo[self.fstype]) + ')'
+ except KeyError:
+ fstype_flags = ''
+ logoutput.append('FSTYPE: [%s%s]' % (self.fstype, fstype_flags))
if self.fstype == 'nfs':
- print('NFS_DIR: [%s]' % self.nfs_dir)
+ logoutput.append('NFS_DIR: [%s]' % self.rootfs)
else:
- print('ROOTFS: [%s]' % self.rootfs)
- print('CONFFILE: [%s]' % self.qemuboot)
- print('')
+ logoutput.append('ROOTFS: [%s]' % self.rootfs)
+ if self.ovmf_bios:
+ logoutput.append('OVMF: %s' % self.ovmf_bios)
+ if (self.ovmf_secboot_pkkek1):
+ logoutput.append('SECBOOT PKKEK1: [%s...]' % self.ovmf_secboot_pkkek1[0:100])
+ logoutput.append('CONFFILE: [%s]' % self.qemuboot)
+ logoutput.append('')
+ logger.info('\n'.join(logoutput))
def setup_nfs(self):
if not self.nfs_server:
@@ -676,25 +965,36 @@ class BaseConfig(object):
else:
self.nfs_server = '192.168.7.1'
- nfs_instance = int(self.nfs_instance)
-
- mountd_rpcport = 21111 + nfs_instance
- nfsd_rpcport = 11111 + nfs_instance
- nfsd_port = 3049 + 2 * nfs_instance
- mountd_port = 3048 + 2 * nfs_instance
- unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port)
- self.unfs_opts = unfs_opts
+ # Figure out a new nfs_instance to allow multiple qemus running.
+ ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
+ pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
+ all_instances = re.findall(pattern, ps, re.M)
+ if all_instances:
+ all_instances.sort(key=int)
+ self.nfs_instance = int(all_instances.pop()) + 1
+
+ nfsd_port = 3049 + 2 * self.nfs_instance
+ mountd_port = 3048 + 2 * self.nfs_instance
+
+ # Export vars for runqemu-export-rootfs
+ export_dict = {
+ 'NFS_INSTANCE': self.nfs_instance,
+ 'NFSD_PORT': nfsd_port,
+ 'MOUNTD_PORT': mountd_port,
+ }
+ for k, v in export_dict.items():
+ # Use '%s' since they are integers
+ os.putenv(k, '%s' % v)
- p = '%s/.runqemu-sdk/pseudo' % os.getenv('HOME')
- os.putenv('PSEUDO_LOCALSTATEDIR', p)
+ self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port)
- # Extract .tar.bz2 or .tar.bz if no self.nfs_dir
- if not self.nfs_dir:
+ # Extract .tar.bz2 or .tar.bz if no nfs dir
+ if not (self.rootfs and os.path.isdir(self.rootfs)):
src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'))
dest = "%s-nfsroot" % src_prefix
if os.path.exists('%s.pseudo_state' % dest):
logger.info('Use %s as NFS_DIR' % dest)
- self.nfs_dir = dest
+ self.rootfs = dest
else:
src = ""
src1 = '%s.tar.bz2' % src_prefix
@@ -704,29 +1004,67 @@ class BaseConfig(object):
elif os.path.exists(src2):
src = src2
if not src:
- raise Exception("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
+ raise RunQemuError("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest))
- cmd = 'runqemu-extract-sdk %s %s' % (src, dest)
- logger.info('Running %s...' % cmd)
- if subprocess.call(cmd, shell=True) != 0:
- raise Exception('Failed to run %s' % cmd)
+ cmd = ('runqemu-extract-sdk', src, dest)
+ logger.info('Running %s...' % str(cmd))
+ if subprocess.call(cmd) != 0:
+ raise RunQemuError('Failed to run %s' % cmd)
self.clean_nfs_dir = True
- self.nfs_dir = dest
+ self.rootfs = dest
# Start the userspace NFS server
- cmd = 'runqemu-export-rootfs restart %s' % self.nfs_dir
- logger.info('Running %s...' % cmd)
- if subprocess.call(cmd, shell=True) != 0:
- raise Exception('Failed to run %s' % cmd)
+ cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
+ logger.info('Running %s...' % str(cmd))
+ if subprocess.call(cmd) != 0:
+ raise RunQemuError('Failed to run %s' % cmd)
self.nfs_running = True
-
def setup_slirp(self):
+ """Setup user networking"""
+
if self.fstype == 'nfs':
self.setup_nfs()
self.kernel_cmdline_script += ' ip=dhcp'
- self.set('NETWORK_CMD', self.get('QB_SLIRP_OPT'))
+ # Port mapping
+ hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+ qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
+ qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
+ # Figure out the port
+ ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
+ ports = [int(i) for i in ports]
+ mac = 2
+
+ lockdir = "/tmp/qemu-port-locks"
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+
+ # Find a free port to avoid conflicts
+ for p in ports[:]:
+ p_new = p
+ while not self.check_free_port('localhost', p_new, lockdir):
+ p_new += 1
+ mac += 1
+ while p_new in ports:
+ p_new += 1
+ mac += 1
+ if p != p_new:
+ ports.append(p_new)
+ qb_slirp_opt = re.sub(':%s-' % p, ':%s-' % p_new, qb_slirp_opt)
+ logger.info("Port forward changed: %s -> %s" % (p, p_new))
+ mac = "%s%02x" % (self.mac_slirp, mac)
+ self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qb_slirp_opt))
+ # Print out port foward
+ hostfwd = re.findall('(hostfwd=[^,]*)', qb_slirp_opt)
+ if hostfwd:
+ logger.info('Port forward: %s' % ' '.join(hostfwd))
def setup_tap(self):
"""Setup tap"""
@@ -741,6 +1079,9 @@ class BaseConfig(object):
lockdir = "/tmp/qemu-tap-locks"
if not (self.qemuifup and self.qemuifdown and ip):
+ logger.error("runqemu-ifup: %s" % self.qemuifup)
+ logger.error("runqemu-ifdown: %s" % self.qemuifdown)
+ logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
if not os.path.exists(lockdir):
@@ -748,22 +1089,23 @@ class BaseConfig(object):
# running at the same time.
try:
os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
except FileExistsError:
pass
- cmd = '%s link' % ip
- logger.info('Running %s...' % cmd)
- ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ cmd = (ip, 'link')
+ logger.debug('Running %s...' % str(cmd))
+ ip_link = subprocess.check_output(cmd).decode('utf-8')
# Matches line like: 6: tap0: <foo>
- possibles = re.findall('^[1-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
tap = ""
for p in possibles:
lockfile = os.path.join(lockdir, p)
if os.path.exists('%s.skip' % lockfile):
logger.info('Found %s.skip, skipping %s' % (lockfile, p))
continue
- self.lock = lockfile + '.lock'
- if self.acquire_lock():
+ self.taplock = lockfile + '.lock'
+ if self.acquire_taplock(error=False):
tap = p
logger.info("Using preconfigured tap device %s" % tap)
logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
@@ -773,83 +1115,108 @@ class BaseConfig(object):
if os.path.exists(nosudo_flag):
logger.error("Error: There are no available tap devices to use for networking,")
logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag)
- raise Exception("a new one with sudo.")
+ raise RunQemuError("a new one with sudo.")
gid = os.getgid()
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
- cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.get('STAGING_DIR_NATIVE'))
- tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n')
+ cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
+ try:
+ tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ except subprocess.CalledProcessError as e:
+ logger.error('Setting up tap device failed:\n%s\nRun runqemu-gen-tapdevs to manually create one.' % str(e))
+ sys.exit(1)
lockfile = os.path.join(lockdir, tap)
- self.lock = lockfile + '.lock'
- self.acquire_lock()
+ self.taplock = lockfile + '.lock'
+ self.acquire_taplock()
self.cleantap = True
- logger.info('Created tap: %s' % tap)
+ logger.debug('Created tap: %s' % tap)
if not tap:
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
- return 1
+ sys.exit(1)
self.tap = tap
- n0 = tap[3:]
- n1 = int(n0) * 2 + 1
- n2 = n1 + 1
- self.nfs_instance = n0
+ tapnum = int(tap[3:])
+ gateway = tapnum * 2 + 1
+ client = gateway + 1
if self.fstype == 'nfs':
self.setup_nfs()
- self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (n2, n1)
- mac = "52:54:00:12:34:%02x" % n2
+ netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
+ logger.info("Network configuration: %s", netconf)
+ self.kernel_cmdline_script += " ip=%s" % netconf
+ mac = "%s%02x" % (self.mac_tap, client)
qb_tap_opt = self.get('QB_TAP_OPT')
if qb_tap_opt:
- qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap).replace('@MAC@', mac)
+ qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap)
else:
- qemu_tap_opt = "-device virtio-net-pci,netdev=net0,mac=%s -netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (mac, self.tap)
+ qemu_tap_opt = "-netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (self.tap)
if self.vhost_enabled:
qemu_tap_opt += ',vhost=on'
- self.set('NETWORK_CMD', qemu_tap_opt)
+ self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
def setup_network(self):
- cmd = "stty -g"
- self.saved_stty = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ if self.get('QB_NET') == 'none':
+ return
+ if sys.stdin.isatty():
+ self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
+ self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
if self.slirp_enabled:
self.setup_slirp()
else:
self.setup_tap()
+ def setup_rootfs(self):
+ if self.get('QB_ROOTFS') == 'none':
+ return
+ if 'wic.' in self.fstype:
+ self.fstype = self.fstype[4:]
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+
qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
if qb_rootfs_opt:
self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs)
else:
- self.rootfs_options = '-drive file=%s,if=virtio,format=raw' % self.rootfs
+ self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
if self.fstype in ('cpio.gz', 'cpio'):
self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
self.rootfs_options = '-initrd %s' % self.rootfs
else:
+ vm_drive = ''
if self.fstype in self.vmtypes:
if self.fstype == 'iso':
- vm_drive = '-cdrom %s' % self.rootfs
- else:
- cmd1 = "grep -q 'root=/dev/sd' %s" % self.rootfs
- cmd2 = "grep -q 'root=/dev/hd' %s" % self.rootfs
- if subprocess.call(cmd1, shell=True) == 0:
+ vm_drive = '-drive file=%s,if=virtio,media=cdrom' % self.rootfs
+ elif self.get('QB_DRIVE_TYPE'):
+ drive_type = self.get('QB_DRIVE_TYPE')
+ if drive_type.startswith("/dev/sd"):
logger.info('Using scsi drive')
- vm_drive = '-drive if=none,id=hd,file=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' % self.rootfs
- elif subprocess.call(cmd2, shell=True) == 0:
- logger.info('Using scsi drive')
- vm_drive = self.rootfs
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
+ % (self.rootfs, rootfs_format)
+ elif drive_type.startswith("/dev/hd"):
+ logger.info('Using ide drive')
+ vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
else:
- logger.warn("Can't detect drive type %s" % self.rootfs)
- logger.warn('Tring to use virtio block drive')
- vm_drive = '-drive if=virtio,file=%s' % self.rootfs
+ # virtio might have been selected explicitly (just use it), or
+ # is used as fallback (then warn about that).
+ if not drive_type.startswith("/dev/vd"):
+ logger.warning("Unknown QB_DRIVE_TYPE: %s" % drive_type)
+ logger.warning("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
+ logger.warning('Trying to use virtio block drive')
+ vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
+
+ # All branches above set vm_drive.
self.rootfs_options = '%s -no-reboot' % vm_drive
- self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+ self.kernel_cmdline = 'root=%s rw' % (self.get('QB_KERNEL_ROOT'))
if self.fstype == 'nfs':
self.rootfs_options = ''
- k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, self.nfs_dir, self.unfs_opts)
- self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+ k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
+ self.kernel_cmdline = 'root=%s rw' % k_root
+
+ if self.fstype == 'none':
+ self.rootfs_options = ''
self.set('ROOTFS_OPTIONS', self.rootfs_options)
@@ -857,7 +1224,7 @@ class BaseConfig(object):
"""attempt to determine the appropriate qemu-system binary"""
mach = self.get('MACHINE')
if not mach:
- search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips|qemuppc).*'
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
if self.rootfs:
match = re.match(search, self.rootfs)
if match:
@@ -884,31 +1251,76 @@ class BaseConfig(object):
qbsys = 'mips'
elif mach == 'qemumips64':
qbsys = 'mips64'
+ elif mach == 'qemumipsel':
+ qbsys = 'mipsel'
+ elif mach == 'qemumips64el':
+ qbsys = 'mips64el'
+ elif mach == 'qemuriscv64':
+ qbsys = 'riscv64'
+ elif mach == 'qemuriscv32':
+ qbsys = 'riscv32'
+ else:
+ logger.error("Unable to determine QEMU PC System emulator for %s machine." % mach)
+ logger.error("As %s is not among valid QEMU machines such as," % mach)
+ logger.error("qemux86-64, qemux86, qemuarm64, qemuarm, qemumips64, qemumips64el, qemumipsel, qemumips, qemuppc")
+ raise RunQemuError("Set qb_system_name with suitable QEMU PC System emulator in .*qemuboot.conf.")
return 'qemu-system-%s' % qbsys
- def setup_final(self):
+ def check_qemu_system(self):
qemu_system = self.get('QB_SYSTEM_NAME')
if not qemu_system:
qemu_system = self.guess_qb_system()
if not qemu_system:
- raise Exception("Failed to boot, QB_SYSTEM_NAME is NULL!")
+ raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
+ self.qemu_system = qemu_system
+
+ def setup_final(self):
+ qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
+
+ # It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
+ # find QEMU in sysroot, it needs to use host's qemu.
+ if not os.path.exists(qemu_bin):
+ logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
+ for path in (os.environ['PATH'] or '').split(':'):
+ qemu_bin_tmp = os.path.join(path, self.qemu_system)
+ logger.info("Trying: %s" % qemu_bin_tmp)
+ if os.path.exists(qemu_bin_tmp):
+ qemu_bin = qemu_bin_tmp
+ if not os.path.isabs(qemu_bin):
+ qemu_bin = os.path.abspath(qemu_bin)
+ logger.info("Using host's QEMU: %s" % qemu_bin)
+ break
- qemu_bin = '%s/%s' % (self.get('STAGING_BINDIR_NATIVE'), qemu_system)
if not os.access(qemu_bin, os.X_OK):
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
- check_libgl(qemu_bin)
+ self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
- self.qemu_opt = "%s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.qemu_opt_script, self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+ for ovmf in self.ovmf_bios:
+ format = ovmf.rsplit('.', 1)[-1]
+ self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
+
+ self.qemu_opt += ' ' + self.qemu_opt_script
+
+ if self.ovmf_secboot_pkkek1:
+ # Provide the Platform Key and first Key Exchange Key certificate as an
+ # OEM string in the SMBIOS Type 11 table. Prepend the certificate string
+ # with "application prefix" of the EnrollDefaultKeys.efi application
+ self.qemu_opt += ' -smbios type=11,value=4e32566d-8e9e-4f52-81d3-5bb9715f9727:' \
+ + self.ovmf_secboot_pkkek1
+
+ # Append qemuparams to override previous settings
+ if self.qemuparams:
+ self.qemu_opt += ' ' + self.qemuparams
if self.snapshot:
self.qemu_opt += " -snapshot"
- if self.serialstdio:
- logger.info("Interrupt character is '^]'")
- cmd = "stty intr ^]"
- subprocess.call(cmd, shell=True)
+ if self.serialconsole:
+ if sys.stdin.isatty():
+ subprocess.check_call(("stty", "intr", "^]"))
+ logger.info("Interrupt character is '^]'")
first_serial = ""
if not re.search("-nographic", self.qemu_opt):
@@ -926,41 +1338,77 @@ class BaseConfig(object):
elif serial_num == 1:
self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
+ # if not serial or serialtcp options was specified only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num == 0:
+ if re.search("-nographic", self.qemu_opt) or self.serialstdio:
+ self.qemu_opt += " -serial mon:stdio -serial null"
+ else:
+ self.qemu_opt += " -serial mon:vc -serial null"
+
def start_qemu(self):
+ import shlex
if self.kernel:
- kernel_opts = "-kernel %s -append '%s %s %s'" % (self.kernel, self.kernel_cmdline, self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'))
+ kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
+ self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
+ self.bootparams)
+ if self.bios:
+ kernel_opts += " -bios %s" % self.bios
if self.dtb:
kernel_opts += " -dtb %s" % self.dtb
else:
kernel_opts = ""
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
- logger.info('Running %s' % cmd)
- if subprocess.call(cmd, shell=True) != 0:
- raise Exception('Failed to run %s' % cmd)
+ cmds = shlex.split(cmd)
+ logger.info('Running %s\n' % cmd)
+ pass_fds = []
+ if self.taplock_descriptor:
+ pass_fds = [self.taplock_descriptor.fileno()]
+ if len(self.portlocks):
+ for descriptor in self.portlocks.values():
+ pass_fds.append(descriptor.fileno())
+ process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
+ self.qemupid = process.pid
+ retcode = process.wait()
+ if retcode:
+ if retcode == -signal.SIGTERM:
+ logger.info("Qemu terminated by SIGTERM")
+ else:
+ logger.error("Failed to run qemu: %s", process.stderr.read().decode())
def cleanup(self):
+ if self.cleaned:
+ return
+
+ # avoid dealing with SIGTERM when cleanup function is running
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ logger.info("Cleaning up")
if self.cleantap:
- cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.get('STAGING_DIR_NATIVE'))
- logger.info('Running %s' % cmd)
- subprocess.call(cmd, shell=True)
- if self.lock_descriptor:
- logger.info("Releasing lockfile for tap device '%s'" % self.tap)
- self.release_lock()
+ cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
+ logger.debug('Running %s' % str(cmd))
+ subprocess.check_call(cmd)
+ self.release_taplock()
+ self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
- cmd = "runqemu-export-rootfs stop %s" % self.nfs_dir
- logger.info('Running %s' % cmd)
- subprocess.call(cmd, shell=True)
+ cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
+ logger.debug('Running %s' % str(cmd))
+ subprocess.check_call(cmd)
if self.saved_stty:
- cmd = "stty %s" % self.saved_stty
- subprocess.call(cmd, shell=True)
+ subprocess.check_call(("stty", self.saved_stty))
if self.clean_nfs_dir:
- logger.info('Removing %s' % self.nfs_dir)
- shutil.rmtree(self.nfs_dir)
- shutil.rmtree('%s.pseudo_state' % self.nfs_dir)
+ logger.info('Removing %s' % self.rootfs)
+ shutil.rmtree(self.rootfs)
+ shutil.rmtree('%s.pseudo_state' % self.rootfs)
+
+ self.cleaned = True
def load_bitbake_env(self, mach=None):
if self.bitbake_e:
@@ -983,38 +1431,69 @@ class BaseConfig(object):
self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
except subprocess.CalledProcessError as err:
self.bitbake_e = ''
- logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+ logger.warning("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+
+ def validate_combos(self):
+ if (self.fstype in self.vmtypes) and self.kernel:
+ raise RunQemuError("%s doesn't need kernel %s!" % (self.fstype, self.kernel))
+
+ @property
+ def bindir_native(self):
+ result = self.get('STAGING_BINDIR_NATIVE')
+ if result and os.path.exists(result):
+ return result
+
+ cmd = ('bitbake', 'qemu-helper-native', '-e')
+ logger.info('Running %s...' % str(cmd))
+ out = subprocess.check_output(cmd).decode('utf-8')
+
+ match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M)
+ if match:
+ result = match.group(1)
+ if os.path.exists(result):
+ self.set('STAGING_BINDIR_NATIVE', result)
+ return result
+ raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
+ else:
+ raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+
def main():
- if len(sys.argv) == 1 or "help" in sys.argv:
+ if "help" in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
print_usage()
return 0
- config = BaseConfig()
try:
+ config = BaseConfig()
+
+ def sigterm_handler(signum, frame):
+ logger.info("SIGTERM received")
+ os.kill(config.qemupid, signal.SIGTERM)
+ config.cleanup()
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
+ signal.signal(signal.SIGTERM, sigterm_handler)
+
config.check_args()
- except Exception as esc:
- logger.error(esc)
- logger.error("Try 'runqemu help' on how to use it")
- return 1
- config.read_qemuboot()
- config.check_and_set()
- config.print_config()
- try:
+ config.read_qemuboot()
+ config.check_and_set()
+ # Check whether the combos is valid or not
+ config.validate_combos()
+ config.print_config()
config.setup_network()
+ config.setup_rootfs()
config.setup_final()
config.start_qemu()
+ except RunQemuError as err:
+ logger.error(err)
+ return 1
+ except Exception as err:
+ import traceback
+ traceback.print_exc()
+ return 1
finally:
config.cleanup()
- return 0
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
if __name__ == "__main__":
- try:
- ret = main()
- except OEPathError as err:
- ret = 1
- logger.error(err.message)
- except Exception as esc:
- ret = 1
- import traceback
- traceback.print_exc()
- sys.exit(ret)
+ sys.exit(main())
diff --git a/scripts/runqemu-addptable2image b/scripts/runqemu-addptable2image
index f0195ad8a3..ca29427258 100755
--- a/scripts/runqemu-addptable2image
+++ b/scripts/runqemu-addptable2image
@@ -4,20 +4,8 @@
#
# Copyright (C) 2006-2007 OpenedHand Ltd.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
IMAGE=$1
IMAGEOUT=$2
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
index 3dee131166..384c091713 100755
--- a/scripts/runqemu-export-rootfs
+++ b/scripts/runqemu-export-rootfs
@@ -2,18 +2,8 @@
#
# Copyright (c) 2005-2009 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
usage() {
echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
@@ -44,7 +34,7 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
@@ -77,24 +67,14 @@ if [ ! -d "$PSEUDO_LOCALSTATEDIR" ]; then
exit 1
fi
-# rpc.mountd RPC port
-NFS_MOUNTPROG=$[ 21111 + $NFS_INSTANCE ]
-# rpc.nfsd RPC port
-NFS_NFSPROG=$[ 11111 + $NFS_INSTANCE ]
-# NFS port number
-NFS_PORT=$[ 3049 + 2 * $NFS_INSTANCE ]
+# NFS server port number
+NFSD_PORT=${NFSD_PORT:=$[ 3049 + 2 * $NFS_INSTANCE ]}
# mountd port number
-MOUNT_PORT=$[ 3048 + 2 * $NFS_INSTANCE ]
+MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]}
## For debugging you would additionally add
## --debug all
-UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -x $NFS_NFSPROG -n $NFS_PORT -y $NFS_MOUNTPROG -m $MOUNT_PORT"
-
-# Setup the exports file
-if [ "$1" = "start" ]; then
- echo "Creating exports file..."
- echo "$NFS_EXPORT_DIR (rw,async,no_root_squash,no_all_squash,insecure)" > $EXPORTS
-fi
+UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
# See how we were called.
case "$1" in
@@ -114,6 +94,9 @@ case "$1" in
exit 1
fi
+ echo "Creating exports file..."
+ echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS
+
echo "Starting User Mode nfsd"
echo " $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS"
$PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS
@@ -133,7 +116,7 @@ case "$1" in
fi
echo " "
echo "On your target please remember to add the following options for NFS"
- echo "nfsroot=IP_ADDRESS:$NFS_EXPORT_DIR,nfsvers=3,port=$NFSD_PORT,mountprog=$MOUNTD_RPCPORT,nfsprog=$NFSD_RPCPORT,udp,mountport=$MOUNTD_PORT"
+ echo "nfsroot=IP_ADDRESS:$NFS_EXPORT_DIR,nfsvers=3,port=$NFSD_PORT,udp,mountport=$MOUNTD_PORT"
;;
stop)
if [ -f "$NFSPID" ]; then
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
index 32ddd485b6..8a4ee90a1d 100755
--- a/scripts/runqemu-extract-sdk
+++ b/scripts/runqemu-extract-sdk
@@ -7,18 +7,8 @@
#
# Copyright (c) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
function usage() {
echo "Usage: $0 <image-tarball> <extract-dir>"
@@ -31,20 +21,20 @@ fi
SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
- echo "Error: Unable to find the oe-find-native-sysroot script"
- echo "Did you forget to source your build system environment setup script?"
- exit 1
+ echo "Error: Unable to find the oe-find-native-sysroot script"
+ echo "Did you forget to source your build system environment setup script?"
+ exit 1
fi
-. $SYSROOT_SETUP_SCRIPT
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
ROOTFS_TARBALL=$1
SDK_ROOTFS_DIR=$2
if [ ! -e "$ROOTFS_TARBALL" ]; then
- echo "Error: sdk tarball '$ROOTFS_TARBALL' does not exist"
- usage
- exit 1
+ echo "Error: sdk tarball '$ROOTFS_TARBALL' does not exist"
+ usage
+ exit 1
fi
# Convert SDK_ROOTFS_DIR to a full pathname
@@ -53,6 +43,9 @@ if [[ ${SDK_ROOTFS_DIR:0:1} != "/" ]]; then
fi
TAR_OPTS=""
+if [[ "$ROOTFS_TARBALL" =~ tar\.xz$ ]]; then
+ TAR_OPTS="--numeric-owner -xJf"
+fi
if [[ "$ROOTFS_TARBALL" =~ tar\.bz2$ ]]; then
TAR_OPTS="--numeric-owner -xjf"
fi
@@ -64,7 +57,7 @@ if [[ "$ROOTFS_TARBALL" =~ \.tar$ ]]; then
fi
if [ -z "$TAR_OPTS" ]; then
echo "Error: Unable to determine sdk tarball format"
- echo "Accepted types: .tar / .tar.gz / .tar.bz2"
+ echo "Accepted types: .tar / .tar.gz / .tar.bz2 / .tar.xz"
exit 1
fi
@@ -76,10 +69,12 @@ fi
pseudo_state_dir="$SDK_ROOTFS_DIR/../$(basename "$SDK_ROOTFS_DIR").pseudo_state"
pseudo_state_dir="$(readlink -f $pseudo_state_dir)"
-if [ -e "$pseudo_state_dir" ]; then
+debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.tar\.'`"
+
+if [ -e "$pseudo_state_dir" -a -z "$debug_image" ]; then
echo "Error: $pseudo_state_dir already exists!"
echo "Please delete the rootfs tree and pseudo directory manually"
- echo "if this is really what you want."
+ echo "if this is really what you want."
exit 1
fi
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
index bfb60f44ae..a6ee4517da 100755
--- a/scripts/runqemu-gen-tapdevs
+++ b/scripts/runqemu-gen-tapdevs
@@ -9,33 +9,31 @@
#
# Copyright (C) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+uid=`id -u`
+gid=`id -g`
+if [ -n "$SUDO_UID" ]; then
+ uid=$SUDO_UID
+fi
+if [ -n "$SUDO_GID" ]; then
+ gid=$SUDO_GID
+fi
usage() {
- echo "Usage: sudo $0 <uid> <gid> <num> <native-sysroot-basedir>"
- echo "Where <uid> is the numeric user id the tap devices will be owned by"
+ echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
+ echo "Where <uid> is the numeric user id the tap devices will be owned by"
echo "Where <gid> is the numeric group id the tap devices will be owned by"
echo "<num> is the number of tap devices to create (0 to remove all)"
echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
+ echo "For example:"
+ echo "$ bitbake qemu-helper-native"
+ echo "$ sudo $0 $uid $gid 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
+ echo ""
exit 1
}
-if [ $EUID -ne 0 ]; then
- echo "Error: This script must be run with root privileges"
- exit
-fi
-
if [ $# -ne 4 ]; then
echo "Error: Incorrect number of arguments"
usage
@@ -44,14 +42,19 @@ fi
TUID=$1
GID=$2
COUNT=$3
-SYSROOT=$4
+STAGING_BINDIR_NATIVE=$4
-TUNCTL=$SYSROOT/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
echo "Error: $TUNCTL is not an executable"
usage
fi
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script must be run with root privileges"
+ exit
+fi
+
SCRIPT_DIR=`dirname $0`
RUNQEMU_IFUP="$SCRIPT_DIR/runqemu-ifup"
if [ ! -x "$RUNQEMU_IFUP" ]; then
@@ -85,7 +88,7 @@ if [ $COUNT -gt 0 ]; then
echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
for ((index=0; index < $COUNT; index++)); do
echo "Creating tap$index"
- ifup=`$RUNQEMU_IFUP $TUID $GID $SYSROOT 2>&1`
+ ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
if [ $? -ne 0 ]; then
echo "Error running tunctl: $ifup"
exit 1
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
index 8f66cfa2a9..a104c37bf8 100755
--- a/scripts/runqemu-ifdown
+++ b/scripts/runqemu-ifdown
@@ -13,18 +13,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
@@ -41,16 +31,26 @@ if [ $# -ne 2 ]; then
fi
TAP=$1
-NATIVE_SYSROOT_DIR=$2
+STAGING_BINDIR_NATIVE=$2
-TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
if [ ! -e "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin', please bitbake qemu-helper-native"
+ echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
exit 1
fi
$TUNCTL -d $TAP
+IFCONFIG=`which ip 2> /dev/null`
+if [ "x$IFCONFIG" = "x" ]; then
+ # better than nothing...
+ IFCONFIG=/sbin/ip
+fi
+if [ -x "$IFCONFIG" ]; then
+ if `$IFCONFIG link show $TAP > /dev/null 2>&1`; then
+ $IFCONFIG link del $TAP
+ fi
+fi
# cleanup the remaining iptables rules
IPTABLES=`which iptables 2> /dev/null`
if [ "x$IPTABLES" = "x" ]; then
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
index d9bd894123..bb661740c5 100755
--- a/scripts/runqemu-ifup
+++ b/scripts/runqemu-ifup
@@ -20,18 +20,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
@@ -49,11 +39,11 @@ fi
USERID="-u $1"
GROUP="-g $2"
-NATIVE_SYSROOT_DIR=$3
+STAGING_BINDIR_NATIVE=$3
-TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
if [ ! -x "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin', please bitbake qemu-helper-native"
+ echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
exit 1
fi
diff --git a/scripts/runqemu.README b/scripts/runqemu.README
index 5908d831a4..da9abd7dfb 100644
--- a/scripts/runqemu.README
+++ b/scripts/runqemu.README
@@ -35,7 +35,7 @@ Notes
run as non root. The runqemu-gen-tapdevs script can also be used by
root to prepopulate the appropriate network devices.
- You can access the host computer at 192.168.7.1 within the image.
- - Your qemu system will be accessible as 192.16.7.2.
+ - Your qemu system will be accessible as 192.168.7.2.
- The script extracts the root filesystem specified under pseudo and sets up a userspace
NFS server to share the image over by default meaning the filesystem can be accessed by
both the host and guest systems.
diff --git a/scripts/send-error-report b/scripts/send-error-report
index 15b5e84911..cfbcaa52cb 100755
--- a/scripts/send-error-report
+++ b/scripts/send-error-report
@@ -6,6 +6,9 @@
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Proca <andreea.b.proca@intel.com>
# Author: Michael Wood <michael.g.wood@intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import urllib.request, urllib.error
import sys
@@ -62,7 +65,7 @@ def edit_content(json_file_path):
def prepare_data(args):
# attempt to get the max_log_size from the server's settings
- max_log_size = getPayloadLimit("http://"+args.server+"/ClientPost/JSON")
+ max_log_size = getPayloadLimit(args.protocol+args.server+"/ClientPost/JSON")
if not os.path.isfile(args.error_file):
log.error("No data file found.")
@@ -88,7 +91,7 @@ def prepare_data(args):
log.error("Name needs to be provided either via "+userfile+" or as an argument (-n).")
sys.exit(1)
- while len(args.name) <= 0 and len(args.name) < 50:
+ while len(args.name) <= 0 or len(args.name) > 50:
print("\nName needs to be given and must not more than 50 characters.")
args.name, args.email = ask_for_contactdetails()
@@ -132,18 +135,18 @@ def send_data(data, args):
headers={'Content-type': 'application/json', 'User-Agent': "send-error-report/"+version}
if args.json:
- url = "http://"+args.server+"/ClientPost/JSON/"
+ url = args.protocol+args.server+"/ClientPost/JSON/"
else:
- url = "http://"+args.server+"/ClientPost/"
+ url = args.protocol+args.server+"/ClientPost/"
req = urllib.request.Request(url, data=data, headers=headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
- logging.error(e.reason)
+ logging.error(str(e))
sys.exit(1)
- print(response.read())
+ print(response.read().decode('utf-8'))
if __name__ == '__main__':
@@ -187,6 +190,11 @@ if __name__ == '__main__':
help="Return the result in json format, silences all other output",
action="store_true")
+ arg_parse.add_argument("--no-ssl",
+ help="Use http instead of https protocol",
+ dest="protocol",
+ action="store_const", const="http://", default="https://")
+
args = arg_parse.parse_args()
diff --git a/scripts/send-pull-request b/scripts/send-pull-request
index 575549db38..70b5a5cfb2 100755
--- a/scripts/send-pull-request
+++ b/scripts/send-pull-request
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2010-2011, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -158,11 +145,16 @@ GIT_EXTRA_CC=$(for R in $EXTRA_CC; do echo -n "--cc='$R' "; done)
unset IFS
# Handoff to git-send-email. It will perform the send confirmation.
+# Mail threading was already handled by git-format-patch in
+# create-pull-request, so we must not allow git-send-email to
+# add In-Reply-To and References headers again.
PATCHES=$(echo $PDIR/*.patch)
if [ $AUTO_CL -eq 1 ]; then
# Send the cover letter to every recipient, both specified as well as
# harvested. Then remove it from the patches list.
- eval "git send-email $GIT_TO $GIT_CC $GIT_EXTRA_CC --confirm=always --no-chain-reply-to --suppress-cc=all $CL"
+ # --no-thread is redundant here (only sending a single message) and
+ # merely added for the sake of consistency.
+ eval "git send-email $GIT_TO $GIT_CC $GIT_EXTRA_CC --confirm=always --no-thread --suppress-cc=all $CL"
if [ $? -eq 1 ]; then
echo "ERROR: failed to send cover-letter with automatic recipients."
exit 1
@@ -172,7 +164,7 @@ fi
# Send the patch to the specified recipients and, if -c was specified, those git
# finds in this specific patch.
-eval "git send-email $GIT_TO $GIT_EXTRA_CC --confirm=always --no-chain-reply-to $GITSOBCC $PATCHES"
+eval "git send-email $GIT_TO $GIT_EXTRA_CC --confirm=always --no-thread $GITSOBCC $PATCHES"
if [ $? -eq 1 ]; then
echo "ERROR: failed to send patches."
exit 1
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
index 2ab450ab59..f1706a2229 100755
--- a/scripts/sstate-cache-management.sh
+++ b/scripts/sstate-cache-management.sh
@@ -2,18 +2,7 @@
# Copyright (c) 2012 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# Global vars
diff --git a/scripts/sstate-diff-machines.sh b/scripts/sstate-diff-machines.sh
index 056aa0a04c..1d721eb87d 100755
--- a/scripts/sstate-diff-machines.sh
+++ b/scripts/sstate-diff-machines.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to compare sstate checksums between MACHINES.
# Execute script and compare generated list.M files.
# Using bash to have PIPESTATUS variable.
@@ -118,7 +120,7 @@ for M in ${machines}; do
cp -ra ${tmpdir}/stamps/* ${OUTPUT}/${M}
find ${OUTPUT}/${M} -name \*sigdata\* | sed "s#${OUTPUT}/${M}/##g" | sort > ${OUTPUT}/${M}/list
M_UNDERSCORE=`echo ${M} | sed 's/-/_/g'`
- sed "s/${M_UNDERSCORE}/MACHINE/g; s/${M}/MACHINE/g" ${OUTPUT}/${M}/list | sort > ${OUTPUT}/${M}/list.M
+ sed "s/^${M_UNDERSCORE}-/MACHINE/g" ${OUTPUT}/${M}/list | sort > ${OUTPUT}/${M}/list.M
find ${tmpdir}/stamps/ -name \*sigdata\* | xargs rm -f
else
printf "ERROR: no sigdata files were generated for MACHINE $M in ${tmpdir}/stamps\n";
diff --git a/scripts/sstate-sysroot-cruft.sh b/scripts/sstate-sysroot-cruft.sh
index b7ed8ea846..fbf1ca3c43 100755
--- a/scripts/sstate-sysroot-cruft.sh
+++ b/scripts/sstate-sysroot-cruft.sh
@@ -1,5 +1,7 @@
#!/bin/sh
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to find files installed in sysroot which are not tracked by sstate manifest
# Global vars
@@ -105,7 +107,9 @@ WHITELIST="${WHITELIST} \
# generated by php
WHITELIST="${WHITELIST} \
+ .*/usr/lib/php5/php/.channels \
.*/usr/lib/php5/php/.channels/.* \
+ .*/usr/lib/php5/php/.registry \
.*/usr/lib/php5/php/.registry/.* \
.*/usr/lib/php5/php/.depdb \
.*/usr/lib/php5/php/.depdblock \
@@ -141,6 +145,18 @@ WHITELIST="${WHITELIST} \
.*/var/cache/fontconfig/ \
"
+# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes:
+# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
+# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
+# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811
+# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061
+WHITELIST="${WHITELIST} \
+ [^/]*/etc/ld.so.conf \
+"
+
SYSROOTS="`readlink -f ${tmpdir}`/sysroots/"
mkdir ${OUTPUT}
diff --git a/scripts/sysroot-relativelinks.py b/scripts/sysroot-relativelinks.py
index e44eba2b11..56e36f3ad5 100755
--- a/scripts/sysroot-relativelinks.py
+++ b/scripts/sysroot-relativelinks.py
@@ -1,4 +1,8 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import os
@@ -24,7 +28,7 @@ def handlelink(filep, subdir):
os.symlink(os.path.relpath(topdir+link, subdir), filep)
for subdir, dirs, files in os.walk(topdir):
- for f in files:
+ for f in dirs + files:
filep = os.path.join(subdir, f)
if os.path.islink(filep):
#print("Considering %s" % filep)
diff --git a/scripts/task-time b/scripts/task-time
new file mode 100755
index 0000000000..bcd1e25817
--- /dev/null
+++ b/scripts/task-time
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import os
+import re
+import sys
+
+arg_parser = argparse.ArgumentParser(
+ description="""
+Reports time consumed for one or more task in a format similar to the standard
+Bash 'time' builtin. Optionally sorts tasks by real (wall-clock), user (user
+space CPU), or sys (kernel CPU) time.
+""")
+
+arg_parser.add_argument(
+ "paths",
+ metavar="path",
+ nargs="+",
+ help="""
+A path containing task buildstats. If the path is a directory, e.g.
+build/tmp/buildstats, then all task found (recursively) in it will be
+processed. If the path is a single task buildstat, e.g.
+build/tmp/buildstats/20161018083535/foo-1.0-r0/do_compile, then just that
+buildstat will be processed. Multiple paths can be specified to process all of
+them. Files whose names do not start with "do_" are ignored.
+""")
+
+arg_parser.add_argument(
+ "--sort",
+ choices=("none", "real", "user", "sys"),
+ default="none",
+ help="""
+The measurement to sort the output by. Defaults to 'none', which means to sort
+by the order paths were given on the command line. For other options, tasks are
+sorted in descending order from the highest value.
+""")
+
+args = arg_parser.parse_args()
+
+# Field names and regexes for parsing out their values from buildstat files
+field_regexes = (("elapsed", ".*Elapsed time: ([0-9.]+)"),
+ ("user", "rusage ru_utime: ([0-9.]+)"),
+ ("sys", "rusage ru_stime: ([0-9.]+)"),
+ ("child user", "Child rusage ru_utime: ([0-9.]+)"),
+ ("child sys", "Child rusage ru_stime: ([0-9.]+)"))
+
+# A list of (<path>, <dict>) tuples, where <path> is the path of a do_* task
+# buildstat file and <dict> maps fields from the file to their values
+task_infos = []
+
+def save_times_for_task(path):
+ """Saves information for the buildstat file 'path' in 'task_infos'."""
+
+ if not os.path.basename(path).startswith("do_"):
+ return
+
+ with open(path) as f:
+ fields = {}
+
+ for line in f:
+ for name, regex in field_regexes:
+ match = re.match(regex, line)
+ if match:
+ fields[name] = float(match.group(1))
+ break
+
+ # Check that all expected fields were present
+ for name, regex in field_regexes:
+ if name not in fields:
+ print("Warning: Skipping '{}' because no field matching '{}' could be found"
+ .format(path, regex),
+ file=sys.stderr)
+ return
+
+ task_infos.append((path, fields))
+
+def save_times_for_dir(path):
+ """Runs save_times_for_task() for each file in path and its subdirs, recursively."""
+
+ # Raise an exception for os.walk() errors instead of ignoring them
+ def walk_onerror(e):
+ raise e
+
+ for root, _, files in os.walk(path, onerror=walk_onerror):
+ for fname in files:
+ save_times_for_task(os.path.join(root, fname))
+
+for path in args.paths:
+ if os.path.isfile(path):
+ save_times_for_task(path)
+ else:
+ save_times_for_dir(path)
+
+def elapsed_time(task_info):
+ return task_info[1]["elapsed"]
+
+def tot_user_time(task_info):
+ return task_info[1]["user"] + task_info[1]["child user"]
+
+def tot_sys_time(task_info):
+ return task_info[1]["sys"] + task_info[1]["child sys"]
+
+if args.sort != "none":
+ sort_fn = {"real": elapsed_time, "user": tot_user_time, "sys": tot_sys_time}
+ task_infos.sort(key=sort_fn[args.sort], reverse=True)
+
+first_entry = True
+
+# Catching BrokenPipeError avoids annoying errors when the output is piped into
+# e.g. 'less' or 'head' and not completely read
+try:
+ for task_info in task_infos:
+ real = elapsed_time(task_info)
+ user = tot_user_time(task_info)
+ sys = tot_sys_time(task_info)
+
+ if not first_entry:
+ print()
+ first_entry = False
+
+ # Mimic Bash's 'time' builtin
+ print("{}:\n"
+ "real\t{}m{:.3f}s\n"
+ "user\t{}m{:.3f}s\n"
+ "sys\t{}m{:.3f}s"
+ .format(task_info[0],
+ int(real//60), real%60,
+ int(user//60), user%60,
+ int(sys//60), sys%60))
+
+except BrokenPipeError:
+ pass
diff --git a/scripts/test-dependencies.sh b/scripts/test-dependencies.sh
deleted file mode 100755
index 00c50e0d6c..0000000000
--- a/scripts/test-dependencies.sh
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/bin/bash
-
-# Author: Martin Jansa <martin.jansa@gmail.com>
-#
-# Copyright (c) 2013 Martin Jansa <Martin.Jansa@gmail.com>
-
-# Used to detect missing dependencies or automagically
-# enabled dependencies which aren't explicitly enabled
-# or disabled. Using bash to have PIPESTATUS variable.
-
-# It does 3 builds of <target>
-# 1st to populate sstate-cache directory and sysroot
-# 2nd to rebuild each recipe with every possible
-# dependency found in sysroot (which stays populated
-# from 1st build
-# 3rd to rebuild each recipe only with dependencies defined
-# in DEPENDS
-# 4th (optional) repeat build like 3rd to make sure that
-# minimal versions of dependencies defined in DEPENDS
-# is also enough
-
-# Global vars
-tmpdir=
-targets=
-recipes=
-buildhistory=
-buildtype=
-default_targets="world"
-default_buildhistory="buildhistory"
-default_buildtype="1 2 3 c"
-
-usage () {
- cat << EOF
-Welcome to utility to detect missing or autoenabled dependencies.
-WARNING: this utility will completely remove your tmpdir (make sure
- you don't have important buildhistory or persistent dir there).
-$0 <OPTION>
-
-Options:
- -h, --help
- Display this help and exit.
-
- --tmpdir=<tmpdir>
- Specify tmpdir, will use the environment variable TMPDIR if it is not specified.
- Something like /OE/oe-core/tmp-eglibc (no / at the end).
-
- --targets=<targets>
- List of targets separated by space, will use the environment variable TARGETS if it is not specified.
- It will run "bitbake <targets>" to populate sysroots.
- Default value is "world".
-
- --recipes=<recipes>
- File with list of recipes we want to rebuild with minimal and maximal sysroot.
- Will use the environment variable RECIPES if it is not specified.
- Default value will use all packages ever recorded in buildhistory directory.
-
- --buildhistory=<buildhistory>
- Path to buildhistory directory, it needs to be enabled in your config,
- because it's used to detect different dependencies and to create list
- of recipes to rebuild when it's not specified.
- Will use the environment variable BUILDHISTORY if it is not specified.
- Default value is "buildhistory"
-
- --buildtype=<buildtype>
- There are 4 types of build:
- 1: build to populate sstate-cache directory and sysroot
- 2: build to rebuild each recipe with every possible dep
- 3: build to rebuild each recipe with minimal dependencies
- 4: build to rebuild each recipe again with minimal dependencies
- c: compare buildhistory directories from build 2 and 3
- Will use the environment variable BUILDTYPE if it is not specified.
- Default value is "1 2 3 c", order is important, type 4 is optional.
-EOF
-}
-
-# Print error information and exit.
-echo_error () {
- echo "ERROR: $1" >&2
- exit 1
-}
-
-while [ -n "$1" ]; do
- case $1 in
- --tmpdir=*)
- tmpdir=`echo $1 | sed -e 's#^--tmpdir=##' | xargs readlink -e`
- [ -d "$tmpdir" ] || echo_error "Invalid argument to --tmpdir"
- shift
- ;;
- --targets=*)
- targets=`echo $1 | sed -e 's#^--targets="*\([^"]*\)"*#\1#'`
- shift
- ;;
- --recipes=*)
- recipes=`echo $1 | sed -e 's#^--recipes="*\([^"]*\)"*#\1#'`
- shift
- ;;
- --buildhistory=*)
- buildhistory=`echo $1 | sed -e 's#^--buildhistory="*\([^"]*\)"*#\1#'`
- shift
- ;;
- --buildtype=*)
- buildtype=`echo $1 | sed -e 's#^--buildtype="*\([^"]*\)"*#\1#'`
- shift
- ;;
- --help|-h)
- usage
- exit 0
- ;;
- *)
- echo "Invalid arguments $*"
- echo_error "Try '$0 -h' for more information."
- ;;
- esac
-done
-
-# tmpdir directory, use environment variable TMPDIR
-# if it was not specified, otherwise, error.
-[ -n "$tmpdir" ] || tmpdir=$TMPDIR
-[ -n "$tmpdir" ] || echo_error "No tmpdir found!"
-[ -d "$tmpdir" ] || echo_error "Invalid tmpdir \"$tmpdir\""
-[ -n "$targets" ] || targets=$TARGETS
-[ -n "$targets" ] || targets=$default_targets
-[ -n "$recipes" ] || recipes=$RECIPES
-[ -n "$recipes" -a ! -f "$recipes" ] && echo_error "Invalid file with list of recipes to rebuild"
-[ -n "$recipes" ] || echo "All packages ever recorded in buildhistory directory will be rebuilt"
-[ -n "$buildhistory" ] || buildhistory=$BUILDHISTORY
-[ -n "$buildhistory" ] || buildhistory=$default_buildhistory
-[ -d "$buildhistory" ] || echo_error "Invalid buildhistory directory \"$buildhistory\""
-[ -n "$buildtype" ] || buildtype=$BUILDTYPE
-[ -n "$buildtype" ] || buildtype=$default_buildtype
-echo "$buildtype" | grep -v '^[1234c ]*$' && echo_error "Invalid buildtype \"$buildtype\", only some combination of 1, 2, 3, 4, c separated by space is allowed"
-
-OUTPUT_BASE=test-dependencies/`date "+%s"`
-declare -i RESULT=0
-
-build_all() {
- echo "===== 1st build to populate sstate-cache directory and sysroot ====="
- OUTPUT1=${OUTPUT_BASE}/${TYPE}_all
- mkdir -p ${OUTPUT1}
- echo "Logs will be stored in ${OUTPUT1} directory"
- bitbake -k $targets 2>&1 | tee -a ${OUTPUT1}/complete.log
- RESULT+=${PIPESTATUS[0]}
- grep "ERROR: Task.*failed" ${OUTPUT1}/complete.log > ${OUTPUT1}/failed-tasks.log
- cat ${OUTPUT1}/failed-tasks.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' | sort -u > ${OUTPUT1}/failed-recipes.log
-}
-
-build_every_recipe() {
- if [ "${TYPE}" = "2" ] ; then
- echo "===== 2nd build to rebuild each recipe with every possible dep ====="
- OUTPUT_MAX=${OUTPUT_BASE}/${TYPE}_max
- OUTPUTB=${OUTPUT_MAX}
- else
- echo "===== 3rd or 4th build to rebuild each recipe with minimal dependencies ====="
- OUTPUT_MIN=${OUTPUT_BASE}/${TYPE}_min
- OUTPUTB=${OUTPUT_MIN}
- fi
-
- mkdir -p ${OUTPUTB} ${OUTPUTB}/failed ${OUTPUTB}/ok
- echo "Logs will be stored in ${OUTPUTB} directory"
- if [ -z "$recipes" ]; then
- ls -d $buildhistory/packages/*/* | xargs -n 1 basename | sort -u > ${OUTPUTB}/recipe.list
- recipes=${OUTPUTB}/recipe.list
- fi
- if [ "${TYPE}" != "2" ] ; then
- echo "!!!Removing tmpdir \"$tmpdir\"!!!"
- rm -rf $tmpdir/deploy $tmpdir/pkgdata $tmpdir/sstate-control $tmpdir/stamps $tmpdir/sysroots $tmpdir/work $tmpdir/work-shared 2>/dev/null
- fi
- i=1
- count=`cat $recipes ${OUTPUT1}/failed-recipes.log | sort -u | wc -l`
- for recipe in `cat $recipes ${OUTPUT1}/failed-recipes.log | sort -u`; do
- echo "Building recipe: ${recipe} ($i/$count)"
- declare -i RECIPE_RESULT=0
- bitbake -c cleansstate ${recipe} > ${OUTPUTB}/${recipe}.log 2>&1;
- RECIPE_RESULT+=$?
- bitbake ${recipe} >> ${OUTPUTB}/${recipe}.log 2>&1;
- RECIPE_RESULT+=$?
- if [ "${RECIPE_RESULT}" != "0" ] ; then
- RESULT+=${RECIPE_RESULT}
- mv ${OUTPUTB}/${recipe}.log ${OUTPUTB}/failed/
- grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | tee -a ${OUTPUTB}/failed-tasks.log
- grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' >> ${OUTPUTB}/failed-recipes.log
- # and append also ${recipe} in case the failed task was from some dependency
- echo ${recipe} >> ${OUTPUTB}/failed-recipes.log
- else
- mv ${OUTPUTB}/${recipe}.log ${OUTPUTB}/ok/
- fi
- if [ "${TYPE}" != "2" ] ; then
- rm -rf $tmpdir/deploy $tmpdir/pkgdata $tmpdir/sstate-control $tmpdir/stamps $tmpdir/sysroots $tmpdir/work $tmpdir/work-shared 2>/dev/null
- fi
- i=`expr $i + 1`
- done
- echo "Copying buildhistory/packages to ${OUTPUTB}"
- cp -ra $buildhistory/packages ${OUTPUTB}
- # This will be usefull to see which library is pulling new dependency
- echo "Copying do_package logs to ${OUTPUTB}/do_package/"
- mkdir ${OUTPUTB}/do_package
- find $tmpdir/work/ -name log.do_package 2>/dev/null| while read f; do
- # pn is 3 levels back, but we don't know if there is just one log per pn (only one arch and version)
- # dest=`echo $f | sed 's#^.*/\([^/]*\)/\([^/]*\)/\([^/]*\)/log.do_package#\1#g'`
- dest=`echo $f | sed "s#$tmpdir/work/##g; s#/#_#g"`
- cp $f ${OUTPUTB}/do_package/$dest
- done
-}
-
-compare_deps() {
- # you can run just compare task with command like this
- # OUTPUT_BASE=test-dependencies/1373140172 \
- # OUTPUT_MAX=${OUTPUT_BASE}/2_max \
- # OUTPUT_MIN=${OUTPUT_BASE}/3_min \
- # openembedded-core/scripts/test-dependencies.sh --tmpdir=tmp-eglibc --targets=glib-2.0 --recipes=recipe_list --buildtype=c
- echo "===== Compare dependencies recorded in \"${OUTPUT_MAX}\" and \"${OUTPUT_MIN}\" ====="
- [ -n "${OUTPUTC}" ] || OUTPUTC=${OUTPUT_BASE}/comp
- mkdir -p ${OUTPUTC}
- OUTPUT_FILE=${OUTPUTC}/dependency-changes
- echo "Differences will be stored in ${OUTPUT_FILE}, dot is shown for every 100 of checked packages"
- echo > ${OUTPUT_FILE}
-
- [ -d ${OUTPUT_MAX} ] || echo_error "Directory with output from build 2 \"${OUTPUT_MAX}\" does not exist"
- [ -d ${OUTPUT_MIN} ] || echo_error "Directory with output from build 3 \"${OUTPUT_MIN}\" does not exist"
- [ -d ${OUTPUT_MAX}/packages/ ] || echo_error "Directory with packages from build 2 \"${OUTPUT_MAX}/packages/\" does not exist"
- [ -d ${OUTPUT_MIN}/packages/ ] || echo_error "Directory with packages from build 3 \"${OUTPUT_MIN}/packages/\" does not exist"
- i=0
- find ${OUTPUT_MAX}/packages/ -name latest | sed "s#${OUTPUT_MAX}/##g" | while read pkg; do
- max_pkg=${OUTPUT_MAX}/${pkg}
- min_pkg=${OUTPUT_MIN}/${pkg}
- # pkg=packages/armv5te-oe-linux-gnueabi/libungif/libungif/latest
- recipe=`echo "${pkg}" | sed 's#packages/[^/]*/\([^/]*\)/\([^/]*\)/latest#\1#g'`
- package=`echo "${pkg}" | sed 's#packages/[^/]*/\([^/]*\)/\([^/]*\)/latest#\2#g'`
- if [ ! -f "${min_pkg}" ] ; then
- echo "ERROR: ${recipe}: ${package} package isn't created when building with minimal dependencies?" | tee -a ${OUTPUT_FILE}
- echo ${recipe} >> ${OUTPUTC}/failed-recipes.log
- continue
- fi
- # strip version information in parenthesis
- max_deps=`grep "^RDEPENDS = " ${max_pkg} | sed 's/^RDEPENDS = / /g; s/$/ /g; s/([^(]*)//g'`
- min_deps=`grep "^RDEPENDS = " ${min_pkg} | sed 's/^RDEPENDS = / /g; s/$/ /g; s/([^(]*)//g'`
- if [ "$i" = 100 ] ; then
- echo -n "." # cheap progressbar
- i=0
- fi
- if [ "${max_deps}" = "${min_deps}" ] ; then
- # it's annoying long, but at least it's showing some progress, warnings are grepped at the end
- echo "NOTE: ${recipe}: ${package} rdepends weren't changed" >> ${OUTPUT_FILE}
- else
- missing_deps=
- for dep in ${max_deps}; do
- if ! echo "${min_deps}" | grep -q " ${dep} " ; then
- missing_deps="${missing_deps} ${dep}"
- echo # to get rid of dots on last line
- echo "WARN: ${recipe}: ${package} rdepends on ${dep}, but it isn't a build dependency?" | tee -a ${OUTPUT_FILE}
- fi
- done
- if [ -n "${missing_deps}" ] ; then
- echo ${recipe} >> ${OUTPUTC}/failed-recipes.log
- fi
- fi
- i=`expr $i + 1`
- done
- echo # to get rid of dots on last line
- echo "Found differences: "
- grep "^WARN: " ${OUTPUT_FILE} | tee ${OUTPUT_FILE}.warn.log
- echo "Found errors: "
- grep "^ERROR: " ${OUTPUT_FILE} | tee ${OUTPUT_FILE}.error.log
- RESULT+=`cat ${OUTPUT_FILE}.warn.log | wc -l`
- RESULT+=`cat ${OUTPUT_FILE}.error.log | wc -l`
-}
-
-for TYPE in $buildtype; do
- case ${TYPE} in
- 1) build_all;;
- 2) build_every_recipe;;
- 3) build_every_recipe;;
- 4) build_every_recipe;;
- c) compare_deps;;
- *) echo_error "Invalid buildtype \"$TYPE\""
- esac
-done
-
-cat ${OUTPUT_BASE}/*/failed-recipes.log | sort -u >> ${OUTPUT_BASE}/failed-recipes.log
-
-if [ "${RESULT}" != "0" ] ; then
- echo "ERROR: ${RESULT} issues were found in these recipes: `cat ${OUTPUT_BASE}/failed-recipes.log | xargs`"
-fi
-
-echo "INFO: Output written in: ${OUTPUT_BASE}"
-exit ${RESULT}
diff --git a/scripts/test-reexec b/scripts/test-reexec
index 9eaa96e754..fccdac4da6 100755
--- a/scripts/test-reexec
+++ b/scripts/test-reexec
@@ -3,21 +3,8 @@
# Test Script for task re-execution
#
# Copyright 2012 Intel Corporation
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to address issues for re-execution of
@@ -38,9 +25,9 @@ mkdir -p $LOGS
function clearsstate {
target=$1
- sstate_dir=`bitbake $target -e | grep "^SSTATE_DIR" | cut -d "\"" -f 2`
- sstate_pkgspec=`bitbake $target -e | grep "^SSTATE_PKGSPEC" | cut -d "\"" -f 2`
- sstasks=`bitbake $target -e | grep "^SSTATETASKS" | cut -d "\"" -f 2`
+ sstate_dir=`bitbake $target -e | grep "^SSTATE_DIR=" | cut -d "\"" -f 2`
+ sstate_pkgspec=`bitbake $target -e | grep "^SSTATE_PKGSPEC=" | cut -d "\"" -f 2`
+ sstasks=`bitbake $target -e | grep "^SSTATETASKS=" | cut -d "\"" -f 2`
for sstask in $sstasks
do
diff --git a/scripts/test-remote-image b/scripts/test-remote-image
index 27b1cae38f..d209d22854 100755
--- a/scripts/test-remote-image
+++ b/scripts/test-remote-image
@@ -1,19 +1,9 @@
#!/usr/bin/env python3
-
-# Copyright (c) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (c) 2014 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script is used to test public autobuilder images on remote hardware.
diff --git a/scripts/tiny/dirsize.py b/scripts/tiny/dirsize.py
index ddccc5a8c8..501516b0d4 100755
--- a/scripts/tiny/dirsize.py
+++ b/scripts/tiny/dirsize.py
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Display details of the root filesystem size, broken up by directory.
# Allows for limiting by size to focus on the larger files.
diff --git a/scripts/tiny/ksize.py b/scripts/tiny/ksize.py
index b9d2b192cf..bc11919f4b 100755
--- a/scripts/tiny/ksize.py
+++ b/scripts/tiny/ksize.py
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Display details of the kernel build size, broken up by built-in.o. Sort
# the objects by size. Run from the top level kernel build directory.
@@ -41,7 +27,7 @@ def usage():
class Sizes:
def __init__(self, glob):
self.title = glob
- p = Popen("size -t " + glob, shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
output = p.communicate()[0].splitlines()
if len(output) > 2:
sizes = output[-1].split()[0:4]
@@ -62,18 +48,18 @@ class Report:
r = Report(filename, title)
path = os.path.dirname(filename)
- p = Popen("ls " + path + "/*.o | grep -v built-in.o",
+ p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
shell=True, stdout=PIPE, stderr=PIPE)
glob = ' '.join(p.communicate()[0].splitlines())
- oreport = Report(glob, path + "/*.o")
- oreport.sizes.title = path + "/*.o"
+ oreport = Report(glob, str(path) + "/*.o")
+ oreport.sizes.title = str(path) + "/*.o"
r.parts.append(oreport)
if subglob:
p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
for f in p.communicate()[0].splitlines():
path = os.path.dirname(f)
- r.parts.append(Report.create(f, path, path + "/*/built-in.o"))
+ r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
r.parts.sort(reverse=True)
for b in r.parts:
@@ -116,6 +102,13 @@ class Report:
self.deltas["data"], self.deltas["bss"]))
print("\n")
+ def __lt__(this, that):
+ if that is None:
+ return 1
+ if not isinstance(that, Report):
+ raise TypeError
+ return this.sizes.total < that.sizes.total
+
def __cmp__(this, that):
if that is None:
return 1
diff --git a/scripts/tiny/ksum.py b/scripts/tiny/ksum.py
new file mode 100755
index 0000000000..8f0e4c0517
--- /dev/null
+++ b/scripts/tiny/ksum.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
+# module sizes for a built kernel, as a quick tool for comparing the
+# overall effects of systemic tinification changes. Execute from the
+# base directory of the kernel build you want to summarize. Setting
+# the 'verbose' flag will display the sizes for each file included in
+# the summary.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+__version__ = "0.1.0"
+
+# Python Standard Library modules
+import os
+import sys
+import getopt
+from subprocess import *
+
+def usage():
+ prog = os.path.basename(sys.argv[0])
+ print('Usage: %s [OPTION]...' % prog)
+ print(' -v, display sizes for each file')
+ print(' -h, --help display this help and exit')
+ print('')
+ print('Run %s from the top-level Linux kernel build directory.' % prog)
+
+verbose = False
+
+n_ko_files = 0
+ko_file_list = []
+
+ko_text = 0
+ko_data = 0
+ko_bss = 0
+ko_total = 0
+
+vmlinux_file = ""
+vmlinux_level = 0
+
+vmlinux_text = 0
+vmlinux_data = 0
+vmlinux_bss = 0
+vmlinux_total = 0
+
+def is_vmlinux_file(filename):
+ global vmlinux_level
+ if filename == ("vmlinux") and vmlinux_level == 0:
+ vmlinux_level += 1
+ return True
+ return False
+
+def is_ko_file(filename):
+ if filename.endswith(".ko"):
+ return True
+ return False
+
+def collect_object_files():
+ print("Collecting object files recursively from %s..." % os.getcwd())
+ for dirpath, dirs, files in os.walk(os.getcwd()):
+ for filename in files:
+ if is_ko_file(filename):
+ ko_file_list.append(os.path.join(dirpath, filename))
+ elif is_vmlinux_file(filename):
+ global vmlinux_file
+ vmlinux_file = os.path.join(dirpath, filename)
+ print("Collecting object files [DONE]")
+
+def add_ko_file(filename):
+ p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
+ output = p.communicate()[0].splitlines()
+ if len(output) > 2:
+ sizes = output[-1].split()[0:4]
+ if verbose:
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % filename[len(os.getcwd()) + 1:])
+ global n_ko_files, ko_text, ko_data, ko_bss, ko_total
+ ko_text += int(sizes[0])
+ ko_data += int(sizes[1])
+ ko_bss += int(sizes[2])
+ ko_total += int(sizes[3])
+ n_ko_files += 1
+
+def get_vmlinux_totals():
+ p = Popen("size -t " + vmlinux_file, shell=True, stdout=PIPE, stderr=PIPE)
+ output = p.communicate()[0].splitlines()
+ if len(output) > 2:
+ sizes = output[-1].split()[0:4]
+ if verbose:
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % vmlinux_file[len(os.getcwd()) + 1:])
+ global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
+ vmlinux_text += int(sizes[0])
+ vmlinux_data += int(sizes[1])
+ vmlinux_bss += int(sizes[2])
+ vmlinux_total += int(sizes[3])
+
+def sum_ko_files():
+ for ko_file in ko_file_list:
+ add_ko_file(ko_file)
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "vh", ["help"])
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
+ usage()
+ sys.exit(2)
+
+ for o, a in opts:
+ if o == '-v':
+ global verbose
+ verbose = True
+ elif o in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+ else:
+ assert False, "unhandled option"
+
+ collect_object_files()
+ sum_ko_files()
+ get_vmlinux_totals()
+
+ print("\nTotals:")
+ print("\nvmlinux:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total))
+ print("\nmodules (%d):" % n_ko_files)
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (ko_text, ko_data, ko_bss, ko_total))
+ print("\nvmlinux + modules:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text + ko_text, vmlinux_data + ko_data, \
+ vmlinux_bss + ko_bss, vmlinux_total + ko_total))
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc(5)
+ sys.exit(ret)
diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms
index 0741e18447..fb0cc719ea 100755
--- a/scripts/verify-bashisms
+++ b/scripts/verify-bashisms
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys, os, subprocess, re, shutil
@@ -6,7 +9,7 @@ whitelist = (
# type is supported by dash
'if type systemctl >/dev/null 2>/dev/null; then',
'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
- 'if type update-rc.d >/dev/null 2>/dev/null; then',
+ 'type update-rc.d >/dev/null 2>/dev/null; then',
'command -v',
# HOSTNAME is set locally
'buildhistory_single_commit "$CMDLINE" "$HOSTNAME"',
@@ -22,7 +25,10 @@ def is_whitelisted(s):
return True
return False
-def process(recipe, function, script):
+SCRIPT_LINENO_RE = re.compile(r' line (\d+) ')
+BASHISM_WARNING = re.compile(r'^(possible bashism in.*)$', re.MULTILINE)
+
+def process(filename, function, lineno, script):
import tempfile
if not script.startswith("#!"):
@@ -40,18 +46,38 @@ def process(recipe, function, script):
# TODO check exit code is 1
# Replace the temporary filename with the function and split it
- output = e.output.replace(fn.name, function).splitlines()
- if len(results) % 2 != 0:
- print("Unexpected output from checkbashism: %s" % str(output))
- return
-
- # Turn the output into a list of (message, source) values
+ output = e.output.replace(fn.name, function)
+ if not output or not output.startswith('possible bashism'):
+ # Probably starts with or contains only warnings. Dump verbatim
+ # with one space indention. Can't do the splitting and whitelist
+ # checking below.
+ return '\n'.join([filename,
+ ' Unexpected output from checkbashisms.pl'] +
+ [' ' + x for x in output.splitlines()])
+
+ # We know that the first line matches and that therefore the first
+ # list entry will be empty - skip it.
+ output = BASHISM_WARNING.split(output)[1:]
+ # Turn the output into a single string like this:
+ # /.../foobar.bb
+ # possible bashism in updatercd_postrm line 2 (type):
+ # if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
+ # ...
+ # ...
result = []
# Check the results against the whitelist
for message, source in zip(output[0::2], output[1::2]):
if not is_whitelisted(source):
- result.append((message, source))
- return result
+ if lineno is not None:
+ message = SCRIPT_LINENO_RE.sub(lambda m: ' line %d ' % (int(m.group(1)) + int(lineno) - 1),
+ message)
+ result.append(' ' + message.strip())
+ result.extend([' %s' % x for x in source.splitlines()])
+ if result:
+ result.insert(0, filename)
+ return '\n'.join(result)
+ else:
+ return None
def get_tinfoil():
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -66,51 +92,67 @@ def get_tinfoil():
return tinfoil
if __name__=='__main__':
- import shutil
+ import argparse, shutil
+
+ parser = argparse.ArgumentParser(description='Bashim detector for shell fragments in recipes.')
+ parser.add_argument("recipes", metavar="RECIPE", nargs="*", help="recipes to check (if not specified, all will be checked)")
+ parser.add_argument("--verbose", default=False, action="store_true")
+ args = parser.parse_args()
+
if shutil.which("checkbashisms.pl") is None:
- print("Cannot find checkbashisms.pl on $PATH")
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
sys.exit(1)
+ # The order of defining the worker function,
+ # initializing the pool and connecting to the
+ # bitbake server is crucial, don't change it.
+ def func(item):
+ (filename, key, lineno), script = item
+ if args.verbose:
+ print("Scanning %s:%s" % (filename, key))
+ return process(filename, key, lineno, script)
+
+ import multiprocessing
+ pool = multiprocessing.Pool()
+
tinfoil = get_tinfoil()
# This is only the default configuration and should iterate over
# recipecaches to handle multiconfig environments
pkg_pn = tinfoil.cooker.recipecaches[""].pkg_pn
- # TODO: use argparse and have --help
- if len(sys.argv) > 1:
- initial_pns = sys.argv[1:]
+ if args.recipes:
+ initial_pns = args.recipes
else:
initial_pns = sorted(pkg_pn)
- pns = []
- print("Generating file list...")
+ pns = set()
+ scripts = {}
+ print("Generating scripts...")
for pn in initial_pns:
for fn in pkg_pn[pn]:
# There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
+ # (at least in general - there is some risk that the variants contain different scripts)
realfn, _, _ = bb.cache.virtualfn2realfn(fn)
if realfn not in pns:
- pns.append(realfn)
-
+ pns.add(realfn)
+ data = tinfoil.parse_recipe_file(realfn)
+ for key in data.keys():
+ if data.getVarFlag(key, "func") and not data.getVarFlag(key, "python"):
+ script = data.getVar(key, False)
+ if script:
+ filename = data.getVarFlag(key, "filename")
+ lineno = data.getVarFlag(key, "lineno")
+ # There's no point in checking a function multiple
+ # times just because different recipes include it.
+ # We identify unique scripts by file, name, and (just in case)
+ # line number.
+ attributes = (filename or realfn, key, lineno)
+ scripts.setdefault(attributes, script)
- def func(fn):
- result = []
- data = tinfoil.parse_recipe_file(fn)
- for key in data.keys():
- if data.getVarFlag(key, "func", True) and not data.getVarFlag(key, "python", True):
- script = data.getVar(key, False)
- if not script: continue
- #print ("%s:%s" % (fn, key))
- r = process(fn, key, script)
- if r: result.extend(r)
- return fn, result
print("Scanning scripts...\n")
- import multiprocessing
- pool = multiprocessing.Pool()
- for pn,results in pool.imap(func, pns):
- if results:
- print(pn)
- for message,source in results:
- print(" %s\n %s" % (message, source))
- print()
+ for result in pool.imap(func, scripts.items()):
+ if result:
+ print(result)
+ tinfoil.shutdown()
diff --git a/scripts/wic b/scripts/wic
index fe2c33f0e7..24700f380f 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'wic' is the OpenEmbedded Image Creator that users can
# use to generate bootable images. Invoking it without any arguments
@@ -33,29 +19,61 @@ __version__ = "0.2.0"
# Python Standard Library modules
import os
import sys
-import optparse
+import argparse
import logging
+import subprocess
+
+from collections import namedtuple
from distutils import spawn
# External modules
-scripts_path = os.path.abspath(os.path.dirname(__file__))
+scripts_path = os.path.dirname(os.path.realpath(__file__))
lib_path = scripts_path + '/lib'
sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_oe_lib_path()
+
+# Check whether wic is running within eSDK environment
+sdkroot = scripts_path
+if os.environ.get('SDKTARGETSYSROOT'):
+ while sdkroot != '' and sdkroot != os.sep:
+ if os.path.exists(os.path.join(sdkroot, '.devtoolbase')):
+ # Set BUILDDIR for wic to work within eSDK
+ os.environ['BUILDDIR'] = sdkroot
+ # .devtoolbase only exists within eSDK
+ # If found, initialize bitbake path for eSDK environment and append to PATH
+ sdkroot = os.path.join(os.path.dirname(scripts_path), 'bitbake', 'bin')
+ os.environ['PATH'] += ":" + sdkroot
+ break
+ sdkroot = os.path.dirname(sdkroot)
bitbake_exe = spawn.find_executable('bitbake')
if bitbake_exe:
- bitbake_path = os.path.join(os.path.dirname(bitbake_exe), '../lib')
- sys.path.insert(0, bitbake_path)
- from bb import cookerdata
- from bb.main import bitbake_main, BitBakeConfigParameters
-else:
- bitbake_main = None
-
-from wic.utils.oe.misc import get_bitbake_var, BB_VARS
-from wic.utils.errors import WicError
+ bitbake_path = scriptpath.add_bitbake_lib_path()
+ import bb
+
+from wic import WicError
+from wic.misc import get_bitbake_var, BB_VARS
from wic import engine
from wic import help as hlp
+
+def wic_logger():
+ """Create and convfigure wic logger."""
+ logger = logging.getLogger('wic')
+ logger.setLevel(logging.INFO)
+
+ handler = logging.StreamHandler()
+
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
+ handler.setFormatter(formatter)
+
+ logger.addHandler(handler)
+
+ return logger
+
+logger = wic_logger()
+
def rootfs_dir_to_args(krootfs_dir):
"""
Get a rootfs_dir dict and serialize to string
@@ -66,70 +84,32 @@ def rootfs_dir_to_args(krootfs_dir):
rootfs_dir += '='.join([key, val])
return rootfs_dir.strip()
-def callback_rootfs_dir(option, opt, value, parser):
- """
- Build a dict using --rootfs_dir connection=dir
- """
- if not type(parser.values.rootfs_dir) is dict:
- parser.values.rootfs_dir = dict()
- if '=' in value:
- (key, rootfs_dir) = value.split('=')
- else:
- key = 'ROOTFS_DIR'
- rootfs_dir = value
+class RootfsArgAction(argparse.Action):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def __call__(self, parser, namespace, value, option_string=None):
+ if not "rootfs_dir" in vars(namespace) or \
+ not type(namespace.__dict__['rootfs_dir']) is dict:
+ namespace.__dict__['rootfs_dir'] = {}
- parser.values.rootfs_dir[key] = rootfs_dir
+ if '=' in value:
+ (key, rootfs_dir) = value.split('=')
+ else:
+ key = 'ROOTFS_DIR'
+ rootfs_dir = value
+
+ namespace.__dict__['rootfs_dir'][key] = rootfs_dir
-def wic_create_subcommand(args, usage_str):
+
+def wic_create_subcommand(options, usage_str):
"""
Command-line handling for image creation. The real work is done
by image.engine.wic_create()
"""
- parser = optparse.OptionParser(usage=usage_str)
-
- parser.add_option("-o", "--outdir", dest="outdir",
- help="name of directory to create image in")
- parser.add_option("-e", "--image-name", dest="image_name",
- help="name of the image to use the artifacts from "
- "e.g. core-image-sato")
- parser.add_option("-r", "--rootfs-dir", dest="rootfs_dir", type="string",
- action="callback", callback=callback_rootfs_dir,
- help="path to the /rootfs dir to use as the "
- ".wks rootfs source")
- parser.add_option("-b", "--bootimg-dir", dest="bootimg_dir",
- help="path to the dir containing the boot artifacts "
- "(e.g. /EFI or /syslinux dirs) to use as the "
- ".wks bootimg source")
- parser.add_option("-k", "--kernel-dir", dest="kernel_dir",
- help="path to the dir containing the kernel to use "
- "in the .wks bootimg")
- parser.add_option("-n", "--native-sysroot", dest="native_sysroot",
- help="path to the native sysroot containing the tools "
- "to use to build the image")
- parser.add_option("-p", "--skip-build-check", dest="build_check",
- action="store_false", default=True, help="skip the build check")
- parser.add_option("-f", "--build-rootfs", action="store_true", help="build rootfs")
- parser.add_option("-c", "--compress-with", choices=("gzip", "bzip2", "xz"),
- dest='compressor',
- help="compress image with specified compressor")
- parser.add_option("-m", "--bmap", action="store_true", help="generate .bmap")
- parser.add_option("-v", "--vars", dest='vars_dir',
- help="directory with <image>.env files that store "
- "bitbake variables")
- parser.add_option("-D", "--debug", dest="debug", action="store_true",
- default=False, help="output debug information")
-
- (options, args) = parser.parse_args(args)
-
- if len(args) != 1:
- logging.error("Wrong number of arguments, exiting\n")
- parser.print_help()
- sys.exit(1)
-
- if options.build_rootfs and not bitbake_main:
- logging.error("Can't build roofs as bitbake is not in the $PATH")
- sys.exit(1)
+ if options.build_rootfs and not bitbake_exe:
+ raise WicError("Can't build rootfs as bitbake is not in the $PATH")
if not options.image_name:
missed = []
@@ -140,9 +120,8 @@ def wic_create_subcommand(args, usage_str):
if not val:
missed.append(opt)
if missed:
- print("The following build artifacts are not specified:")
- print(" " + ", ".join(missed))
- sys.exit(1)
+ raise WicError("The following build artifacts are not specified: %s" %
+ ", ".join(missed))
if options.image_name:
BB_VARS.default_image = options.image_name
@@ -152,15 +131,11 @@ def wic_create_subcommand(args, usage_str):
if options.vars_dir:
BB_VARS.vars_dir = options.vars_dir
- if options.build_check:
- print("Checking basic build environment...")
- if not engine.verify_build_env():
- print("Couldn't verify build environment, exiting\n")
- sys.exit(1)
- else:
- print("Done.\n")
+ if options.build_check and not engine.verify_build_env():
+ raise WicError("Couldn't verify build environment, exiting")
- bootimg_dir = ""
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
if options.image_name:
if options.build_rootfs:
@@ -168,33 +143,38 @@ def wic_create_subcommand(args, usage_str):
if options.debug:
argv.append("--debug")
- print("Building rootfs...\n")
- if bitbake_main(BitBakeConfigParameters(argv),
- cookerdata.CookerConfiguration()):
- sys.exit(1)
+ logger.info("Building rootfs...\n")
+ subprocess.check_call(argv)
rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
- native_sysroot = get_bitbake_var("STAGING_DIR_NATIVE",
- options.image_name)
+ bootimg_dir = get_bitbake_var("STAGING_DATADIR", options.image_name)
+
+ native_sysroot = options.native_sysroot
+ if options.vars_dir and not native_sysroot:
+ native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", options.image_name)
else:
if options.build_rootfs:
- print("Image name is not specified, exiting. (Use -e/--image-name to specify it)\n")
- sys.exit(1)
+ raise WicError("Image name is not specified, exiting. "
+ "(Use -e/--image-name to specify it)")
+ native_sysroot = options.native_sysroot
+
+ if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
+ logger.info("Building wic-tools...\n")
+ subprocess.check_call(["bitbake", "wic-tools"])
+ native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
- wks_file = args[0]
+ if not native_sysroot:
+ raise WicError("Unable to find the location of the native tools sysroot")
+
+ wks_file = options.wks_file
if not wks_file.endswith(".wks"):
wks_file = engine.find_canned_image(scripts_path, wks_file)
if not wks_file:
- print("No image named %s found, exiting. (Use 'wic list images' "\
- "to list available images, or specify a fully-qualified OE "\
- "kickstart (.wks) filename)\n" % args[0])
- sys.exit(1)
-
- image_output_dir = ""
- if options.outdir:
- image_output_dir = options.outdir
+ raise WicError("No image named %s found, exiting. (Use 'wic list images' "
+ "to list available images, or specify a fully-qualified OE "
+ "kickstart (.wks) filename)" % options.wks_file)
if not options.image_name:
rootfs_dir = ''
@@ -204,17 +184,13 @@ def wic_create_subcommand(args, usage_str):
kernel_dir = options.kernel_dir
native_sysroot = options.native_sysroot
if rootfs_dir and not os.path.isdir(rootfs_dir):
- print("--roofs-dir (-r) not found, exiting\n")
- sys.exit(1)
+ raise WicError("--rootfs-dir (-r) not found, exiting")
if not os.path.isdir(bootimg_dir):
- print("--bootimg-dir (-b) not found, exiting\n")
- sys.exit(1)
+ raise WicError("--bootimg-dir (-b) not found, exiting")
if not os.path.isdir(kernel_dir):
- print("--kernel-dir (-k) not found, exiting\n")
- sys.exit(1)
+ raise WicError("--kernel-dir (-k) not found, exiting")
if not os.path.isdir(native_sysroot):
- print("--native-sysroot (-n) not found, exiting\n")
- sys.exit(1)
+ raise WicError("--native-sysroot (-n) not found, exiting")
else:
not_found = not_found_dir = ""
if not os.path.isdir(rootfs_dir):
@@ -226,13 +202,11 @@ def wic_create_subcommand(args, usage_str):
if not_found:
if not not_found_dir:
not_found_dir = "Completely missing artifact - wrong image (.wks) used?"
- print("Build artifacts not found, exiting.")
- print(" (Please check that the build artifacts for the machine")
- print(" selected in local.conf actually exist and that they")
- print(" are the correct artifacts for the image (.wks file)).\n")
- print("The artifact that couldn't be found was %s:\n %s" % \
- (not_found, not_found_dir))
- sys.exit(1)
+ logger.info("Build artifacts not found, exiting.")
+ logger.info(" (Please check that the build artifacts for the machine")
+ logger.info(" selected in local.conf actually exist and that they")
+ logger.info(" are the correct artifacts for the image (.wks file)).\n")
+ raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
krootfs_dir = options.rootfs_dir
if krootfs_dir is None:
@@ -241,10 +215,9 @@ def wic_create_subcommand(args, usage_str):
rootfs_dir = rootfs_dir_to_args(krootfs_dir)
- print("Creating image(s)...\n")
+ logger.info("Creating image(s)...\n")
engine.wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
- native_sysroot, scripts_path, image_output_dir,
- options.compressor, options.bmap, options.debug)
+ native_sysroot, options)
def wic_list_subcommand(args, usage_str):
@@ -252,64 +225,312 @@ def wic_list_subcommand(args, usage_str):
Command-line handling for listing available images.
The real work is done by image.engine.wic_list()
"""
- parser = optparse.OptionParser(usage=usage_str)
- args = parser.parse_args(args)[1]
-
if not engine.wic_list(args, scripts_path):
- logging.error("Bad list arguments, exiting\n")
- parser.print_help()
- sys.exit(1)
+ raise WicError("Bad list arguments, exiting")
+
+
+def wic_ls_subcommand(args, usage_str):
+ """
+ Command-line handling for list content of images.
+ The real work is done by engine.wic_ls()
+ """
+ engine.wic_ls(args, args.native_sysroot)
+
+def wic_cp_subcommand(args, usage_str):
+ """
+ Command-line handling for copying files/dirs to images.
+ The real work is done by engine.wic_cp()
+ """
+ engine.wic_cp(args, args.native_sysroot)
+
+def wic_rm_subcommand(args, usage_str):
+ """
+ Command-line handling for removing files/dirs from images.
+ The real work is done by engine.wic_rm()
+ """
+ engine.wic_rm(args, args.native_sysroot)
+def wic_write_subcommand(args, usage_str):
+ """
+ Command-line handling for writing images.
+ The real work is done by engine.wic_write()
+ """
+ engine.wic_write(args, args.native_sysroot)
-def wic_help_topic_subcommand(args, usage_str):
+def wic_help_subcommand(args, usage_str):
"""
- Command-line handling for help-only 'subcommands'. This is
- essentially a dummy command that doesn nothing but allow users to
- use the existing subcommand infrastructure to display help on a
- particular topic not attached to any particular subcommand.
+ Command-line handling for help subcommand to keep the current
+ structure of the function definitions.
"""
pass
+def wic_help_topic_subcommand(usage_str, help_str):
+ """
+ Display function for help 'sub-subcommands'.
+ """
+ print(help_str)
+ return
+
+
wic_help_topic_usage = """
"""
-subcommands = {
- "create": [wic_create_subcommand,
- hlp.wic_create_usage,
- hlp.wic_create_help],
- "list": [wic_list_subcommand,
- hlp.wic_list_usage,
- hlp.wic_list_help],
+helptopics = {
"plugins": [wic_help_topic_subcommand,
wic_help_topic_usage,
- hlp.get_wic_plugins_help],
+ hlp.wic_plugins_help],
"overview": [wic_help_topic_subcommand,
wic_help_topic_usage,
hlp.wic_overview_help],
"kickstart": [wic_help_topic_subcommand,
wic_help_topic_usage,
hlp.wic_kickstart_help],
+ "create": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_create_help],
+ "ls": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_ls_help],
+ "cp": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_cp_help],
+ "rm": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_rm_help],
+ "write": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_write_help],
+ "list": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_list_help]
}
-def start_logging(loglevel):
- logging.basicConfig(filename='wic.log', filemode='w', level=loglevel)
+def wic_init_parser_create(subparser):
+ subparser.add_argument("wks_file")
+
+ subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
+ help="name of directory to create image in")
+ subparser.add_argument("-e", "--image-name", dest="image_name",
+ help="name of the image to use the artifacts from "
+ "e.g. core-image-sato")
+ subparser.add_argument("-r", "--rootfs-dir", action=RootfsArgAction,
+ help="path to the /rootfs dir to use as the "
+ ".wks rootfs source")
+ subparser.add_argument("-b", "--bootimg-dir", dest="bootimg_dir",
+ help="path to the dir containing the boot artifacts "
+ "(e.g. /EFI or /syslinux dirs) to use as the "
+ ".wks bootimg source")
+ subparser.add_argument("-k", "--kernel-dir", dest="kernel_dir",
+ help="path to the dir containing the kernel to use "
+ "in the .wks bootimg")
+ subparser.add_argument("-n", "--native-sysroot", dest="native_sysroot",
+ help="path to the native sysroot containing the tools "
+ "to use to build the image")
+ subparser.add_argument("-s", "--skip-build-check", dest="build_check",
+ action="store_false", default=True, help="skip the build check")
+ subparser.add_argument("-f", "--build-rootfs", action="store_true", help="build rootfs")
+ subparser.add_argument("-c", "--compress-with", choices=("gzip", "bzip2", "xz"),
+ dest='compressor',
+ help="compress image with specified compressor")
+ subparser.add_argument("-m", "--bmap", action="store_true", help="generate .bmap")
+ subparser.add_argument("--no-fstab-update" ,action="store_true",
+ help="Do not change fstab file.")
+ subparser.add_argument("-v", "--vars", dest='vars_dir',
+ help="directory with <image>.env files that store "
+ "bitbake variables")
+ subparser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+ subparser.add_argument("-i", "--imager", dest="imager",
+ default="direct", help="the wic imager plugin")
+ return
+
+
+def wic_init_parser_list(subparser):
+ subparser.add_argument("list_type",
+ help="can be 'images' or 'source-plugins' "
+ "to obtain a list. "
+ "If value is a valid .wks image file")
+ subparser.add_argument("help_for", default=[], nargs='*',
+ help="If 'list_type' is a valid .wks image file "
+ "this value can be 'help' to show the help information "
+ "defined inside the .wks file")
+ return
+
+def imgtype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts path spec to named tuple: (image, partition, path)
+ """
+ image = arg
+ part = path = None
+ if ':' in image:
+ image, part = image.split(':')
+ if '/' in part:
+ part, path = part.split('/', 1)
+ if not path:
+ path = '/'
+
+ if not os.path.isfile(image):
+ err = "%s is not a regular file or symlink" % image
+ raise argparse.ArgumentTypeError(err)
+
+ return namedtuple('ImgType', 'image part path')(image, part, path)
+
+def wic_init_parser_ls(subparser):
+ subparser.add_argument("path", type=imgtype,
+ help="image spec: <image>[:<vfat partition>[<path>]]")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def imgpathtype(arg):
+ img = imgtype(arg)
+ if img.part is None:
+ raise argparse.ArgumentTypeError("partition number is not specified")
+ return img
+
+def wic_init_parser_cp(subparser):
+ subparser.add_argument("src",
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
+ subparser.add_argument("dest",
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def wic_init_parser_rm(subparser):
+ subparser.add_argument("path", type=imgpathtype,
+ help="path: <image>:<vfat partition><path>")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+ subparser.add_argument("-r", dest="recursive_delete", action="store_true", default=False,
+ help="remove directories and their contents recursively, "
+ " this only applies to ext* partition")
+
+def expandtype(rules):
+ """
+ Custom type for ArgumentParser
+ Converts expand rules to the dictionary {<partition>: size}
+ """
+ if rules == 'auto':
+ return {}
+ result = {}
+ for rule in rules.split(','):
+ try:
+ part, size = rule.split(':')
+ except ValueError:
+ raise argparse.ArgumentTypeError("Incorrect rule format: %s" % rule)
+
+ if not part.isdigit():
+ raise argparse.ArgumentTypeError("Rule '%s': partition number must be integer" % rule)
+
+ # validate size
+ multiplier = 1
+ for suffix, mult in [('K', 1024), ('M', 1024 * 1024), ('G', 1024 * 1024 * 1024)]:
+ if size.upper().endswith(suffix):
+ multiplier = mult
+ size = size[:-1]
+ break
+ if not size.isdigit():
+ raise argparse.ArgumentTypeError("Rule '%s': size must be integer" % rule)
+
+ result[int(part)] = int(size) * multiplier
+
+ return result
+
+def wic_init_parser_write(subparser):
+ subparser.add_argument("image",
+ help="path to the wic image")
+ subparser.add_argument("target",
+ help="target file or device")
+ subparser.add_argument("-e", "--expand", type=expandtype,
+ help="expand rules: auto or <partition>:<size>[,<partition>:<size>]")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def wic_init_parser_help(subparser):
+ helpparsers = subparser.add_subparsers(dest='help_topic', help=hlp.wic_usage)
+ for helptopic in helptopics:
+ helpparsers.add_parser(helptopic, help=helptopics[helptopic][2])
+ return
+subcommands = {
+ "create": [wic_create_subcommand,
+ hlp.wic_create_usage,
+ hlp.wic_create_help,
+ wic_init_parser_create],
+ "list": [wic_list_subcommand,
+ hlp.wic_list_usage,
+ hlp.wic_list_help,
+ wic_init_parser_list],
+ "ls": [wic_ls_subcommand,
+ hlp.wic_ls_usage,
+ hlp.wic_ls_help,
+ wic_init_parser_ls],
+ "cp": [wic_cp_subcommand,
+ hlp.wic_cp_usage,
+ hlp.wic_cp_help,
+ wic_init_parser_cp],
+ "rm": [wic_rm_subcommand,
+ hlp.wic_rm_usage,
+ hlp.wic_rm_help,
+ wic_init_parser_rm],
+ "write": [wic_write_subcommand,
+ hlp.wic_write_usage,
+ hlp.wic_write_help,
+ wic_init_parser_write],
+ "help": [wic_help_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_help_help,
+ wic_init_parser_help]
+}
+
+
+def init_parser(parser):
+ parser.add_argument("--version", action="version",
+ version="%(prog)s {version}".format(version=__version__))
+ parser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+
+ subparsers = parser.add_subparsers(dest='command', help=hlp.wic_usage)
+ for subcmd in subcommands:
+ subparser = subparsers.add_parser(subcmd, help=subcommands[subcmd][2])
+ subcommands[subcmd][3](subparser)
+
+class WicArgumentParser(argparse.ArgumentParser):
+ def format_help(self):
+ return hlp.wic_help
+
def main(argv):
- parser = optparse.OptionParser(version="wic version %s" % __version__,
- usage=hlp.wic_usage)
+ parser = WicArgumentParser(
+ description="wic version %s" % __version__)
- parser.disable_interspersed_args()
+ init_parser(parser)
- args = parser.parse_args(argv)[1]
+ args = parser.parse_args(argv)
- if len(args):
- if args[0] == "help":
- if len(args) == 1:
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
+ if "command" in vars(args):
+ if args.command == "help":
+ if args.help_topic is None:
parser.print_help()
- sys.exit(1)
+ elif args.help_topic in helptopics:
+ hlpt = helptopics[args.help_topic]
+ hlpt[0](hlpt[1], hlpt[2])
+ return 0
+
+ # validate wic cp src and dest parameter to identify which one of it is
+ # image and cast it into imgtype
+ if args.command == "cp":
+ if ":" in args.dest:
+ args.dest = imgtype(args.dest)
+ elif ":" in args.src:
+ args.src = imgtype(args.src)
+ else:
+ raise argparse.ArgumentTypeError("no image or partition number specified.")
return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
@@ -318,6 +539,6 @@ if __name__ == "__main__":
try:
sys.exit(main(sys.argv[1:]))
except WicError as err:
- print("ERROR:", err, file=sys.stderr)
+ print()
+ logger.error(err)
sys.exit(1)
-
diff --git a/scripts/wipe-sysroot b/scripts/wipe-sysroot
deleted file mode 100755
index 5e6b1a4e2a..0000000000
--- a/scripts/wipe-sysroot
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/sh
-
-# Wipe out all of the sysroots and all of the stamps that populated it.
-# Author: Ross Burton <ross.burton@intel.com>
-#
-# Copyright (c) 2012 Intel Corporation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-set -e
-
-if [ $# -gt 0 ]; then
- echo "Wipe all sysroots and sysroot-related stamps for the current build directory." >&2
- echo "Usage: $0" >&2
- exit 1
-fi
-
-ENVS=`mktemp --suffix -wipe-sysroot-envs`
-bitbake -p -e > $ENVS
-
-eval `grep -F SSTATE_MANIFESTS= $ENVS`
-eval `grep -F STAGING_DIR= $ENVS`
-eval `grep -F STAMPS_DIR= $ENVS`
-rm -f $ENVS
-
-if [ -z "$SSTATE_MANIFESTS" -o -z "$STAGING_DIR" -o -z "$STAMPS_DIR" ]; then
- echo "Could not determine SSTATE_MANIFESTS/STAGING_DIR/STAMPS_DIR from bitbake, check above for errors"
- exit 1
-fi
-
-echo "Deleting the sysroots in $STAGING_DIR, and selected stamps in $SSTATE_MANIFESTS and $STAMPS_DIR."
-
-# The sysroots themselves
-rm -rf $STAGING_DIR ${STAGING_DIR}-uninative
-
-# The stamps that said the sysroot was populated
-rm -rf $STAMPS_DIR/*/*/*.do_populate_sysroot.*
-rm -rf $STAMPS_DIR/*/*/*.do_populate_sysroot_setscene.*
-rm -rf $STAMPS_DIR/*/*/*.do_packagedata.*
-rm -rf $STAMPS_DIR/*/*/*.do_packagedata_setscene.*
-
-# The sstate manifests
-rm -rf $SSTATE_MANIFESTS/manifest-*.populate_sysroot
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
new file mode 100755
index 0000000000..010830f842
--- /dev/null
+++ b/scripts/yocto-check-layer
@@ -0,0 +1,217 @@
+#!/usr/bin/env python3
+
+# Yocto Project layer checking tool
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+import argparse
+import logging
+import time
+import signal
+import shutil
+import collections
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
+
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures
+from oeqa.utils.commands import get_bb_vars
+
+PROGNAME = 'yocto-check-layer'
+CASES_PATHS = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'lib', 'checklayer', 'cases')]
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+def test_layer(td, layer, test_software_layer_signatures):
+ from checklayer.context import CheckLayerTestContext
+ logger.info("Starting to analyze: %s" % layer['name'])
+ logger.info("----------------------------------------------------------------------")
+
+ tc = CheckLayerTestContext(td=td, logger=logger, layer=layer, test_software_layer_signatures=test_software_layer_signatures)
+ tc.loadTests(CASES_PATHS)
+ return tc.runTests()
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Yocto Project layer checking tool",
+ add_help=False)
+ parser.add_argument('layers', metavar='LAYER_DIR', nargs='+',
+ help='Layer to check')
+ parser.add_argument('-o', '--output-log',
+ help='File to output log (optional)', action='store')
+ parser.add_argument('--dependency', nargs="+",
+ help='Layers to process for dependencies', action='store')
+ parser.add_argument('--machines', nargs="+",
+ help='List of MACHINEs to be used during testing', action='store')
+ parser.add_argument('--additional-layers', nargs="+",
+ help='List of additional layers to add during testing', action='store')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--with-software-layer-signature-check', action='store_true', dest='test_software_layer_signatures',
+ default=True,
+ help='check that software layers do not change signatures (on by default)')
+ group.add_argument('--without-software-layer-signature-check', action='store_false', dest='test_software_layer_signatures',
+ help='disable signature checking for software layers')
+ parser.add_argument('-n', '--no-auto', help='Disable auto layer discovery',
+ action='store_true')
+ parser.add_argument('-d', '--debug', help='Enable debug output',
+ action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors',
+ action='store_true')
+
+ parser.add_argument('-h', '--help', action='help',
+ default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ args = parser.parse_args()
+
+ if args.output_log:
+ fh = logging.FileHandler(args.output_log)
+ fh.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(fh)
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if not 'BUILDDIR' in os.environ:
+ logger.error("You must source the environment before run this script.")
+ logger.error("$ source oe-init-build-env")
+ return 1
+ builddir = os.environ['BUILDDIR']
+ bblayersconf = os.path.join(builddir, 'conf', 'bblayers.conf')
+
+ layers = detect_layers(args.layers, args.no_auto)
+ if not layers:
+ logger.error("Fail to detect layers")
+ return 1
+ if args.additional_layers:
+ additional_layers = detect_layers(args.additional_layers, args.no_auto)
+ else:
+ additional_layers = []
+ if args.dependency:
+ dep_layers = detect_layers(args.dependency, args.no_auto)
+ dep_layers = dep_layers + layers
+ else:
+ dep_layers = layers
+
+ logger.info("Detected layers:")
+ for layer in layers:
+ if layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ logger.error("%s: Can't be DISTRO and BSP type at the same time."\
+ " The conf/distro and conf/machine folders was found."\
+ % layer['name'])
+ layers.remove(layer)
+ elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
+ logger.error("%s: Don't have conf/layer.conf file."\
+ % layer['name'])
+ layers.remove(layer)
+ else:
+ logger.info("%s: %s, %s" % (layer['name'], layer['type'],
+ layer['path']))
+ if not layers:
+ return 1
+
+ shutil.copyfile(bblayersconf, bblayersconf + '.backup')
+ def cleanup_bblayers(signum, frame):
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+ os.unlink(bblayersconf + '.backup')
+ signal.signal(signal.SIGTERM, cleanup_bblayers)
+ signal.signal(signal.SIGINT, cleanup_bblayers)
+
+ td = {}
+ results = collections.OrderedDict()
+ results_status = collections.OrderedDict()
+
+ layers_tested = 0
+ for layer in layers:
+ if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
+ layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ continue
+
+ logger.info('')
+ logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
+ layer['path']))
+
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+ missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
+ if not missing_dependencies:
+ for additional_layer in additional_layers:
+ if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
+ missing_dependencies = True
+ break
+ if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
+ any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
+ additional_layers)):
+ logger.info('Skipping %s due to missing dependencies.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
+ layers_tested = layers_tested + 1
+ continue
+
+ if any(map(lambda additional_layer: not add_layers(bblayersconf, [additional_layer], logger),
+ additional_layers)):
+ logger.info('Skipping %s due to missing additional layers.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Missing additional layers)'
+ layers_tested = layers_tested + 1
+ continue
+
+ logger.info('Getting initial bitbake variables ...')
+ td['bbvars'] = get_bb_vars()
+ logger.info('Getting initial signatures ...')
+ td['builddir'] = builddir
+ try:
+ td['sigs'], td['tunetasks'] = get_signatures(td['builddir'])
+ except RuntimeError as e:
+ logger.info(str(e))
+ results[layer['name']] = None
+ results_status[layer['name']] = 'FAIL (Generating world signatures)'
+ layers_tested = layers_tested + 1
+ continue
+ td['machines'] = args.machines
+
+ if not add_layers(bblayersconf, [layer], logger):
+ logger.info('Skipping %s ???.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Unknown)'
+ layers_tested = layers_tested + 1
+ continue
+
+ result = test_layer(td, layer, args.test_software_layer_signatures)
+ results[layer['name']] = result
+ results_status[layer['name']] = 'PASS' if results[layer['name']].wasSuccessful() else 'FAIL'
+ layers_tested = layers_tested + 1
+
+ ret = 0
+ if layers_tested:
+ logger.info('')
+ logger.info('Summary of results:')
+ logger.info('')
+ for layer_name in results_status:
+ logger.info('%s ... %s' % (layer_name, results_status[layer_name]))
+ if not results[layer_name] or not results[layer_name].wasSuccessful():
+ ret = 2 # ret = 1 used for initialization errors
+
+ cleanup_bblayers(None, None)
+
+ return ret
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/yocto-check-layer-wrapper b/scripts/yocto-check-layer-wrapper
new file mode 100755
index 0000000000..2e3b699031
--- /dev/null
+++ b/scripts/yocto-check-layer-wrapper
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+# Yocto Project layer check tool wrapper
+#
+# Creates a temporary build directory to run the yocto-check-layer
+# script to avoid a contaminated environment.
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# SPDX-License-Identifier: MIT
+#
+
+if [ -z "$BUILDDIR" ]; then
+ echo "Please source oe-init-build-env before run this script."
+ exit 2
+fi
+
+# since we are using a temp directory, use the realpath for output
+# log option
+output_log=''
+while getopts o: name
+do
+ case $name in
+ o) output_log=$(realpath "$OPTARG")
+ esac
+done
+shift $(($OPTIND - 1))
+
+# generate a temp directory to run check layer script
+base_dir=$(realpath $BUILDDIR/../)
+cd $base_dir
+
+build_dir=$(mktemp -p $base_dir -d -t build-XXXX)
+
+this_dir=$(dirname $(readlink -f $0))
+
+source $this_dir/../oe-init-build-env $build_dir
+if [[ $output_log != '' ]]; then
+ yocto-check-layer -o "$output_log" "$*"
+else
+ yocto-check-layer "$@"
+fi
+retcode=$?
+
+rm -rf $build_dir
+
+exit $retcode